aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wan
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/wan
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/wan')
-rw-r--r--drivers/net/wan/Kconfig607
-rw-r--r--drivers/net/wan/Makefile86
-rw-r--r--drivers/net/wan/c101.c446
-rw-r--r--drivers/net/wan/cosa.c2100
-rw-r--r--drivers/net/wan/cosa.h117
-rw-r--r--drivers/net/wan/cycx_drv.c586
-rw-r--r--drivers/net/wan/cycx_main.c351
-rw-r--r--drivers/net/wan/cycx_x25.c1609
-rw-r--r--drivers/net/wan/dlci.c566
-rw-r--r--drivers/net/wan/dscc4.c2074
-rw-r--r--drivers/net/wan/farsync.c2712
-rw-r--r--drivers/net/wan/farsync.h357
-rw-r--r--drivers/net/wan/hd64570.h241
-rw-r--r--drivers/net/wan/hd64572.h527
-rw-r--r--drivers/net/wan/hd6457x.c853
-rw-r--r--drivers/net/wan/hdlc_cisco.c330
-rw-r--r--drivers/net/wan/hdlc_fr.c1237
-rw-r--r--drivers/net/wan/hdlc_generic.c343
-rw-r--r--drivers/net/wan/hdlc_ppp.c115
-rw-r--r--drivers/net/wan/hdlc_raw.c90
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c107
-rw-r--r--drivers/net/wan/hdlc_x25.c219
-rw-r--r--drivers/net/wan/hostess_sv11.c420
-rw-r--r--drivers/net/wan/lapbether.c465
-rw-r--r--drivers/net/wan/lmc/Makefile17
-rw-r--r--drivers/net/wan/lmc/lmc.h33
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c85
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h52
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h257
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2201
-rw-r--r--drivers/net/wan/lmc/lmc_media.c1246
-rw-r--r--drivers/net/wan/lmc/lmc_media.h65
-rw-r--r--drivers/net/wan/lmc/lmc_prot.h15
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c249
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h16
-rw-r--r--drivers/net/wan/lmc/lmc_var.h570
-rw-r--r--drivers/net/wan/n2.c562
-rw-r--r--drivers/net/wan/pc300-falc-lh.h1238
-rw-r--r--drivers/net/wan/pc300.h497
-rw-r--r--drivers/net/wan/pc300_drv.c3692
-rw-r--r--drivers/net/wan/pc300_tty.c1095
-rw-r--r--drivers/net/wan/pci200syn.c488
-rw-r--r--drivers/net/wan/sbni.c1735
-rw-r--r--drivers/net/wan/sbni.h141
-rw-r--r--drivers/net/wan/sdla.c1676
-rw-r--r--drivers/net/wan/sdla_chdlc.c4433
-rw-r--r--drivers/net/wan/sdla_fr.c5068
-rw-r--r--drivers/net/wan/sdla_ft1.c344
-rw-r--r--drivers/net/wan/sdla_ppp.c3429
-rw-r--r--drivers/net/wan/sdla_x25.c5496
-rw-r--r--drivers/net/wan/sdladrv.c2318
-rw-r--r--drivers/net/wan/sdlamain.c1341
-rw-r--r--drivers/net/wan/sealevel.c469
-rw-r--r--drivers/net/wan/syncppp.c1488
-rw-r--r--drivers/net/wan/wanpipe_multppp.c2357
-rw-r--r--drivers/net/wan/wanxl.c839
-rw-r--r--drivers/net/wan/wanxl.h152
-rw-r--r--drivers/net/wan/wanxlfw.S895
-rw-r--r--drivers/net/wan/wanxlfw.inc_shipped158
-rw-r--r--drivers/net/wan/x25_asy.c844
-rw-r--r--drivers/net/wan/x25_asy.h50
-rw-r--r--drivers/net/wan/z85230.c1851
-rw-r--r--drivers/net/wan/z85230.h449
63 files changed, 64469 insertions, 0 deletions
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
new file mode 100644
index 000000000000..35791934a602
--- /dev/null
+++ b/drivers/net/wan/Kconfig
@@ -0,0 +1,607 @@
1#
2# wan devices configuration
3#
4
5menu "Wan interfaces"
6 depends on NETDEVICES
7
8config WAN
9 bool "Wan interfaces support"
10 ---help---
11 Wide Area Networks (WANs), such as X.25, Frame Relay and leased
12 lines, are used to interconnect Local Area Networks (LANs) over vast
13 distances with data transfer rates significantly higher than those
14 achievable with commonly used asynchronous modem connections.
15
16 Usually, a quite expensive external device called a `WAN router' is
17 needed to connect to a WAN. As an alternative, a relatively
18 inexpensive WAN interface card can allow your Linux box to directly
19 connect to a WAN.
20
21 If you have one of those cards and wish to use it under Linux,
22 say Y here and also to the WAN driver for your card.
23
24 If unsure, say N.
25
26# There is no way to detect a comtrol sv11 - force it modular for now.
27config HOSTESS_SV11
28 tristate "Comtrol Hostess SV-11 support"
29 depends on WAN && ISA && m
30 help
31 Driver for Comtrol Hostess SV-11 network card which
32 operates on low speed synchronous serial links at up to
33 256Kbps, supporting PPP and Cisco HDLC.
34
35 The driver will be compiled as a module: the
36 module will be called hostess_sv11.
37
38# The COSA/SRP driver has not been tested as non-modular yet.
39config COSA
40 tristate "COSA/SRP sync serial boards support"
41 depends on WAN && ISA && m
42 ---help---
43 Driver for COSA and SRP synchronous serial boards.
44
45 These boards allow to connect synchronous serial devices (for example
46 base-band modems, or any other device with the X.21, V.24, V.35 or
47 V.36 interface) to your Linux box. The cards can work as the
48 character device, synchronous PPP network device, or the Cisco HDLC
49 network device.
50
51 You will need user-space utilities COSA or SRP boards for downloading
52 the firmware to the cards and to set them up. Look at the
53 <http://www.fi.muni.cz/~kas/cosa/> for more information. You can also
54 read the comment at the top of the <file:drivers/net/wan/cosa.c> for
55 details about the cards and the driver itself.
56
57 The driver will be compiled as a module: the
58 module will be called cosa.
59
60config DSCC4
61 tristate "Etinc PCISYNC serial board support"
62 depends on WAN && PCI && m
63 help
64 Driver for Etinc PCISYNC boards based on the Infineon (ex. Siemens)
65 DSCC4 chipset.
66
67 This is supposed to work with the four port card. Take a look at
68 <http://www.cogenit.fr/dscc4/> for further information about the
69 driver.
70
71 To compile this driver as a module, choose M here: the
72 module will be called dscc4.
73
74config DSCC4_PCISYNC
75 bool "Etinc PCISYNC features"
76 depends on DSCC4
77 help
78 Due to Etinc's design choice for its PCISYNC cards, some operations
79 are only allowed on specific ports of the DSCC4. This option is the
80 only way for the driver to know that it shouldn't return a success
81 code for these operations.
82
83 Please say Y if your card is an Etinc's PCISYNC.
84
85config DSCC4_PCI_RST
86 bool "Hard reset support"
87 depends on DSCC4
88 help
89 Various DSCC4 bugs forbid any reliable software reset of the ASIC.
90 As a replacement, some vendors provide a way to assert the PCI #RST
91 pin of DSCC4 through the GPIO port of the card. If you choose Y,
92 the driver will make use of this feature before module removal
93 (i.e. rmmod). The feature is known to be available on Commtech's
94 cards. Contact your manufacturer for details.
95
96 Say Y if your card supports this feature.
97
98#
99# Lan Media's board. Currently 1000, 1200, 5200, 5245
100#
101config LANMEDIA
102 tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards"
103 depends on WAN && PCI
104 ---help---
105 Driver for the following Lan Media family of serial boards:
106
107 - LMC 1000 board allows you to connect synchronous serial devices
108 (for example base-band modems, or any other device with the X.21,
109 V.24, V.35 or V.36 interface) to your Linux box.
110
111 - LMC 1200 with on board DSU board allows you to connect your Linux
112 box directly to a T1 or E1 circuit.
113
114 - LMC 5200 board provides a HSSI interface capable of running up to
115 52 Mbits per second.
116
117 - LMC 5245 board connects directly to a T3 circuit saving the
118 additional external hardware.
119
120 To change setting such as syncPPP vs Cisco HDLC or clock source you
121 will need lmcctl. It is available at <ftp://ftp.lanmedia.com/>
122 (broken link).
123
124 To compile this driver as a module, choose M here: the
125 module will be called lmc.
126
127# There is no way to detect a Sealevel board. Force it modular
128config SEALEVEL_4021
129 tristate "Sealevel Systems 4021 support"
130 depends on WAN && ISA && m
131 help
132 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
133
134 The driver will be compiled as a module: the
135 module will be called sealevel.
136
137config SYNCLINK_SYNCPPP
138 tristate "SyncLink HDLC/SYNCPPP support"
139 depends on WAN
140 help
141 Enables HDLC/SYNCPPP support for the SyncLink WAN driver.
142
143 Normally the SyncLink WAN driver works with the main PPP driver
144 <file:drivers/net/ppp_generic.c> and pppd program.
145 HDLC/SYNCPPP support allows use of the Cisco HDLC/PPP driver
146 <file:drivers/net/wan/syncppp.c>. The SyncLink WAN driver (in
147 character devices) must also be enabled.
148
149# Generic HDLC
150config HDLC
151 tristate "Generic HDLC layer"
152 depends on WAN
153 help
154 Say Y to this option if your Linux box contains a WAN (Wide Area
155 Network) card supported by this driver and you are planning to
156 connect the box to a WAN.
157
158 You will need supporting software from
159 <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
160 Generic HDLC driver currently supports raw HDLC, Cisco HDLC, Frame
161 Relay, synchronous Point-to-Point Protocol (PPP) and X.25.
162
163 To compile this driver as a module, choose M here: the
164 module will be called hdlc.
165
166 If unsure, say N.
167
168config HDLC_RAW
169 bool "Raw HDLC support"
170 depends on HDLC
171 help
172 Generic HDLC driver supporting raw HDLC over WAN connections.
173
174 If unsure, say N.
175
176config HDLC_RAW_ETH
177 bool "Raw HDLC Ethernet device support"
178 depends on HDLC
179 help
180 Generic HDLC driver supporting raw HDLC Ethernet device emulation
181 over WAN connections.
182
183 You will need it for Ethernet over HDLC bridges.
184
185 If unsure, say N.
186
187config HDLC_CISCO
188 bool "Cisco HDLC support"
189 depends on HDLC
190 help
191 Generic HDLC driver supporting Cisco HDLC over WAN connections.
192
193 If unsure, say N.
194
195config HDLC_FR
196 bool "Frame Relay support"
197 depends on HDLC
198 help
199 Generic HDLC driver supporting Frame Relay over WAN connections.
200
201 If unsure, say N.
202
203config HDLC_PPP
204 bool "Synchronous Point-to-Point Protocol (PPP) support"
205 depends on HDLC
206 help
207 Generic HDLC driver supporting PPP over WAN connections.
208
209 If unsure, say N.
210
211config HDLC_X25
212 bool "X.25 protocol support"
213 depends on HDLC && (LAPB=m && HDLC=m || LAPB=y)
214 help
215 Generic HDLC driver supporting X.25 over WAN connections.
216
217 If unsure, say N.
218
219comment "X.25/LAPB support is disabled"
220 depends on WAN && HDLC && (LAPB!=m || HDLC!=m) && LAPB!=y
221
222config PCI200SYN
223 tristate "Goramo PCI200SYN support"
224 depends on HDLC && PCI
225 help
226 Driver for PCI200SYN cards by Goramo sp. j.
227
228 If you have such a card, say Y here and see
229 <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
230
231 To compile this as a module, choose M here: the
232 module will be called pci200syn.
233
234 If unsure, say N.
235
236config WANXL
237 tristate "SBE Inc. wanXL support"
238 depends on HDLC && PCI
239 help
240 Driver for wanXL PCI cards by SBE Inc.
241
242 If you have such a card, say Y here and see
243 <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
244
245 To compile this as a module, choose M here: the
246 module will be called wanxl.
247
248 If unsure, say N.
249
250config WANXL_BUILD_FIRMWARE
251 bool "rebuild wanXL firmware"
252 depends on WANXL && !PREVENT_FIRMWARE_BUILD
253 help
254 Allows you to rebuild firmware run by the QUICC processor.
255 It requires as68k, ld68k and hexdump programs.
256
257 You should never need this option, say N.
258
259config PC300
260 tristate "Cyclades-PC300 support (RS-232/V.35, X.21, T1/E1 boards)"
261 depends on HDLC && PCI
262 ---help---
263 Driver for the Cyclades-PC300 synchronous communication boards.
264
265 These boards provide synchronous serial interfaces to your
266 Linux box (interfaces currently available are RS-232/V.35, X.21 and
267 T1/E1). If you wish to support Multilink PPP, please select the
268 option later and read the file README.mlppp provided by PC300
269 package.
270
271 To compile this as a module, choose M here: the module
272 will be called pc300.
273
274 If unsure, say N.
275
276config PC300_MLPPP
277 bool "Cyclades-PC300 MLPPP support"
278 depends on PC300 && PPP_MULTILINK && PPP_SYNC_TTY && HDLC_PPP
279 help
280 Multilink PPP over the PC300 synchronous communication boards.
281
282comment "Cyclades-PC300 MLPPP support is disabled."
283 depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
284
285comment "Refer to the file README.mlppp, provided by PC300 package."
286 depends on WAN && HDLC && PC300 && (PPP=n || !PPP_MULTILINK || PPP_SYNC_TTY=n || !HDLC_PPP)
287
288config N2
289 tristate "SDL RISCom/N2 support"
290 depends on HDLC && ISA
291 help
292 Driver for RISCom/N2 single or dual channel ISA cards by
293 SDL Communications Inc.
294
295 If you have such a card, say Y here and see
296 <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
297
298 Note that N2csu and N2dds cards are not supported by this driver.
299
300 To compile this driver as a module, choose M here: the module
301 will be called n2.
302
303 If unsure, say N.
304
305config C101
306 tristate "Moxa C101 support"
307 depends on HDLC && ISA
308 help
309 Driver for C101 SuperSync ISA cards by Moxa Technologies Co., Ltd.
310
311 If you have such a card, say Y here and see
312 <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
313
314 To compile this driver as a module, choose M here: the
315 module will be called c101.
316
317 If unsure, say N.
318
319config FARSYNC
320 tristate "FarSync T-Series support"
321 depends on HDLC && PCI
322 ---help---
323 Support for the FarSync T-Series X.21 (and V.35/V.24) cards by
324 FarSite Communications Ltd.
325
326 Synchronous communication is supported on all ports at speeds up to
327 8Mb/s (128K on V.24) using synchronous PPP, Cisco HDLC, raw HDLC,
328 Frame Relay or X.25/LAPB.
329
330 If you want the module to be automatically loaded when the interface
331 is referenced then you should add "alias hdlcX farsync" to
332 /etc/modprobe.conf for each interface, where X is 0, 1, 2, ..., or
333 simply use "alias hdlc* farsync" to indicate all of them.
334
335 To compile this driver as a module, choose M here: the
336 module will be called farsync.
337
338config DLCI
339 tristate "Frame Relay DLCI support"
340 depends on WAN
341 ---help---
342 Support for the Frame Relay protocol.
343
344 Frame Relay is a fast low-cost way to connect to a remote Internet
345 access provider or to form a private wide area network. The one
346 physical line from your box to the local "switch" (i.e. the entry
347 point to the Frame Relay network, usually at the phone company) can
348 carry several logical point-to-point connections to other computers
349 connected to the Frame Relay network. For a general explanation of
350 the protocol, check out <http://www.mplsforum.org/>.
351
352 To use frame relay, you need supporting hardware (called FRAD) and
353 certain programs from the net-tools package as explained in
354 <file:Documentation/networking/framerelay.txt>.
355
356 To compile this driver as a module, choose M here: the
357 module will be called dlci.
358
359config DLCI_COUNT
360 int "Max open DLCI"
361 depends on DLCI
362 default "24"
363 help
364 Maximal number of logical point-to-point frame relay connections
365 (the identifiers of which are called DCLIs) that the driver can
366 handle.
367
368 The default is probably fine.
369
370config DLCI_MAX
371 int "Max DLCI per device"
372 depends on DLCI
373 default "8"
374 help
375 How many logical point-to-point frame relay connections (the
376 identifiers of which are called DCLIs) should be handled by each
377 of your hardware frame relay access devices.
378
379 Go with the default.
380
381config SDLA
382 tristate "SDLA (Sangoma S502/S508) support"
383 depends on DLCI && ISA
384 help
385 Driver for the Sangoma S502A, S502E, and S508 Frame Relay Access
386 Devices.
387
388 These are multi-protocol cards, but only Frame Relay is supported
389 by the driver at this time. Please read
390 <file:Documentation/networking/framerelay.txt>.
391
392 To compile this driver as a module, choose M here: the
393 module will be called sdla.
394
395# Wan router core.
396config WAN_ROUTER_DRIVERS
397 bool "WAN router drivers"
398 depends on WAN && WAN_ROUTER
399 ---help---
400 Connect LAN to WAN via Linux box.
401
402 Select driver your card and remember to say Y to "Wan Router."
403 You will need the wan-tools package which is available from
404 <ftp://ftp.sangoma.com/>. For more information read:
405 <file:Documentation/networking/wan-router.txt>.
406
407 Note that the answer to this question won't directly affect the
408 kernel: saying N will just cause the configurator to skip all
409 the questions about WAN router drivers.
410
411 If unsure, say N.
412
413config VENDOR_SANGOMA
414 tristate "Sangoma WANPIPE(tm) multiprotocol cards"
415 depends on WAN_ROUTER_DRIVERS && WAN_ROUTER && (PCI || ISA) && BROKEN
416 ---help---
417 Driver for S514-PCI/ISA Synchronous Data Link Adapters (SDLA).
418
419 WANPIPE from Sangoma Technologies Inc. <http://www.sangoma.com/>
420 is a family of intelligent multiprotocol WAN adapters with data
421 transfer rates up to 4Mbps. Cards support:
422
423 - X.25, Frame Relay, PPP, Cisco HDLC protocols.
424
425 - API for protocols like HDLC (LAPB), HDLC Streaming, X.25,
426 Frame Relay and BiSync.
427
428 - Ethernet Bridging over Frame Relay protocol.
429
430 - MULTILINK PPP
431
432 - Async PPP (Modem Dialup)
433
434 The next questions will ask you about the protocols you want
435 the driver to support.
436
437 If you have one or more of these cards, say M to this option;
438 and read <file:Documentation/networking/wanpipe.txt>.
439
440 To compile this driver as a module, choose M here: the
441 module will be called wanpipe.
442
443config WANPIPE_CHDLC
444 bool "WANPIPE Cisco HDLC support"
445 depends on VENDOR_SANGOMA
446 ---help---
447 Connect a WANPIPE card to a leased line using the Cisco HDLC.
448
449 - Supports Dual Port Cisco HDLC on the S514-PCI/S508-ISA cards
450 which allows user to build applications using the HDLC streaming API.
451
452 - CHDLC Streaming MULTILINK PPP that can bind multiple WANPIPE T1
453 cards into a single logical channel.
454
455 Say Y and the Cisco HDLC support, HDLC streaming API and
456 MULTILINK PPP will be included in the driver.
457
458config WANPIPE_FR
459 bool "WANPIPE Frame Relay support"
460 depends on VENDOR_SANGOMA
461 help
462 Connect a WANPIPE card to a Frame Relay network, or use Frame Felay
463 API to develop custom applications.
464
465 Contains the Ethernet Bridging over Frame Relay feature, where
466 a WANPIPE frame relay link can be directly connected to the Linux
467 kernel bridge. The Frame Relay option is supported on S514-PCI
468 and S508-ISA cards.
469
470 Say Y and the Frame Relay support will be included in the driver.
471
472config WANPIPE_X25
473 bool "WANPIPE X.25 support"
474 depends on VENDOR_SANGOMA
475 help
476 Connect a WANPIPE card to an X.25 network.
477
478 Includes the X.25 API support for custom applications over the
479 X.25 protocol. The X.25 option is supported on S514-PCI and
480 S508-ISA cards.
481
482 Say Y and the X.25 support will be included in the driver.
483
484config WANPIPE_PPP
485 bool "WANPIPE PPP support"
486 depends on VENDOR_SANGOMA
487 help
488 Connect a WANPIPE card to a leased line using Point-to-Point
489 Protocol (PPP).
490
491 The PPP option is supported on S514-PCI/S508-ISA cards.
492
493 Say Y and the PPP support will be included in the driver.
494
495config WANPIPE_MULTPPP
496 bool "WANPIPE Multi-Port PPP support"
497 depends on VENDOR_SANGOMA
498 help
499 Connect a WANPIPE card to a leased line using Point-to-Point
500 Protocol (PPP).
501
502 Uses in-kernel SyncPPP protocol over the Sangoma HDLC Streaming
503 adapter. In this case each Sangoma adapter port can support an
504 independent PPP connection. For example, a single Quad-Port PCI
505 adapter can support up to four independent PPP links. The PPP
506 option is supported on S514-PCI/S508-ISA cards.
507
508 Say Y and the Multi-Port PPP support will be included in the driver.
509
510config CYCLADES_SYNC
511 tristate "Cyclom 2X(tm) cards (EXPERIMENTAL)"
512 depends on WAN_ROUTER_DRIVERS && (PCI || ISA)
513 ---help---
514 Cyclom 2X from Cyclades Corporation <http://www.cyclades.com/> is an
515 intelligent multiprotocol WAN adapter with data transfer rates up to
516 512 Kbps. These cards support the X.25 and SNA related protocols.
517
518 While no documentation is available at this time please grab the
519 wanconfig tarball in
520 <http://www.conectiva.com.br/~acme/cycsyn-devel/> (with minor changes
521 to make it compile with the current wanrouter include files; efforts
522 are being made to use the original package available at
523 <ftp://ftp.sangoma.com/>).
524
525 Feel free to contact me or the cycsyn-devel mailing list at
526 <acme@conectiva.com.br> and <cycsyn-devel@bazar.conectiva.com.br> for
527 additional details, I hope to have documentation available as soon as
528 possible. (Cyclades Brazil is writing the Documentation).
529
530 The next questions will ask you about the protocols you want the
531 driver to support (for now only X.25 is supported).
532
533 If you have one or more of these cards, say Y to this option.
534
535 To compile this driver as a module, choose M here: the
536 module will be called cyclomx.
537
538config CYCLOMX_X25
539 bool "Cyclom 2X X.25 support (EXPERIMENTAL)"
540 depends on CYCLADES_SYNC
541 help
542 Connect a Cyclom 2X card to an X.25 network.
543
544 Enabling X.25 support will enlarge your kernel by about 11 kB.
545
546# X.25 network drivers
547config LAPBETHER
548 tristate "LAPB over Ethernet driver (EXPERIMENTAL)"
549 depends on WAN && LAPB && X25
550 ---help---
551 Driver for a pseudo device (typically called /dev/lapb0) which allows
552 you to open an LAPB point-to-point connection to some other computer
553 on your Ethernet network.
554
555 In order to do this, you need to say Y or M to the driver for your
556 Ethernet card as well as to "LAPB Data Link Driver".
557
558 To compile this driver as a module, choose M here: the
559 module will be called lapbether.
560
561 If unsure, say N.
562
563config X25_ASY
564 tristate "X.25 async driver (EXPERIMENTAL)"
565 depends on WAN && LAPB && X25
566 ---help---
567 Send and receive X.25 frames over regular asynchronous serial
568 lines such as telephone lines equipped with ordinary modems.
569
570 Experts should note that this driver doesn't currently comply with
571 the asynchronous HDLS framing protocols in CCITT recommendation X.25.
572
573 To compile this driver as a module, choose M here: the
574 module will be called x25_asy.
575
576 If unsure, say N.
577
578config SBNI
579 tristate "Granch SBNI12 Leased Line adapter support"
580 depends on WAN && X86
581 ---help---
582 Driver for ISA SBNI12-xx cards which are low cost alternatives to
583 leased line modems.
584
585 You can find more information and last versions of drivers and
586 utilities at <http://www.granch.ru/>. If you have any question you
587 can send email to <sbni@granch.ru>.
588
589 To compile this driver as a module, choose M here: the
590 module will be called sbni.
591
592 If unsure, say N.
593
594config SBNI_MULTILINE
595 bool "Multiple line feature support"
596 depends on SBNI
597 help
598 Schedule traffic for some parallel lines, via SBNI12 adapters.
599
600 If you have two computers connected with two parallel lines it's
601 possible to increase transfer rate nearly twice. You should have
602 a program named 'sbniconfig' to configure adapters.
603
604 If unsure, say N.
605
606endmenu
607
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
new file mode 100644
index 000000000000..ce6c56b903e7
--- /dev/null
+++ b/drivers/net/wan/Makefile
@@ -0,0 +1,86 @@
1#
2# Makefile for the Linux network (wan) device drivers.
3#
4# 3 Aug 2000, Christoph Hellwig <hch@infradead.org>
5# Rewritten to use lists instead of if-statements.
6#
7
8wanpipe-y := sdlamain.o sdla_ft1.o
9wanpipe-$(CONFIG_WANPIPE_X25) += sdla_x25.o
10wanpipe-$(CONFIG_WANPIPE_FR) += sdla_fr.o
11wanpipe-$(CONFIG_WANPIPE_CHDLC) += sdla_chdlc.o
12wanpipe-$(CONFIG_WANPIPE_PPP) += sdla_ppp.o
13wanpipe-$(CONFIG_WANPIPE_MULTPPP) += wanpipe_multppp.o
14wanpipe-objs := $(wanpipe-y)
15
16cyclomx-y := cycx_main.o
17cyclomx-$(CONFIG_CYCLOMX_X25) += cycx_x25.o
18cyclomx-objs := $(cyclomx-y)
19
20hdlc-y := hdlc_generic.o
21hdlc-$(CONFIG_HDLC_RAW) += hdlc_raw.o
22hdlc-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o
23hdlc-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o
24hdlc-$(CONFIG_HDLC_FR) += hdlc_fr.o
25hdlc-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
26hdlc-$(CONFIG_HDLC_X25) += hdlc_x25.o
27hdlc-objs := $(hdlc-y)
28
29pc300-y := pc300_drv.o
30pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o
31pc300-objs := $(pc300-y)
32
33obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o
34obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o
35obj-$(CONFIG_COSA) += syncppp.o cosa.o
36obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o
37obj-$(CONFIG_DSCC4) += dscc4.o
38obj-$(CONFIG_LANMEDIA) += syncppp.o
39obj-$(CONFIG_SYNCLINK_SYNCPPP) += syncppp.o
40obj-$(CONFIG_X25_ASY) += x25_asy.o
41
42obj-$(CONFIG_LANMEDIA) += lmc/
43
44obj-$(CONFIG_DLCI) += dlci.o
45obj-$(CONFIG_SDLA) += sdla.o
46ifeq ($(CONFIG_WANPIPE_MULTPPP),y)
47 obj-$(CONFIG_VENDOR_SANGOMA) += sdladrv.o wanpipe.o syncppp.o
48else
49 obj-$(CONFIG_VENDOR_SANGOMA) += sdladrv.o wanpipe.o
50endif
51obj-$(CONFIG_CYCLADES_SYNC) += cycx_drv.o cyclomx.o
52obj-$(CONFIG_LAPBETHER) += lapbether.o
53obj-$(CONFIG_SBNI) += sbni.o
54obj-$(CONFIG_PC300) += pc300.o
55obj-$(CONFIG_HDLC) += hdlc.o
56ifeq ($(CONFIG_HDLC_PPP),y)
57 obj-$(CONFIG_HDLC) += syncppp.o
58endif
59obj-$(CONFIG_N2) += n2.o
60obj-$(CONFIG_C101) += c101.o
61obj-$(CONFIG_WANXL) += wanxl.o
62obj-$(CONFIG_PCI200SYN) += pci200syn.o
63
64clean-files := wanxlfw.inc
65$(obj)/wanxl.o: $(obj)/wanxlfw.inc
66
67ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
68ifeq ($(ARCH),m68k)
69 AS68K = $(AS)
70 LD68K = $(LD)
71else
72 AS68K = as68k
73 LD68K = ld68k
74endif
75
76quiet_cmd_build_wanxlfw = BLD FW $@
77 cmd_build_wanxlfw = \
78 $(CPP) -Wp,-MD,$(depfile) -I$(srctree)/include $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
79 $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
80 hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
81 rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
82
83$(obj)/wanxlfw.inc: $(src)/wanxlfw.S
84 $(call if_changed_dep,build_wanxlfw)
85targets += wanxlfw.inc
86endif
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
new file mode 100644
index 000000000000..43d854ace233
--- /dev/null
+++ b/drivers/net/wan/c101.c
@@ -0,0 +1,446 @@
1/*
2 * Moxa C101 synchronous serial card driver for Linux
3 *
4 * Copyright (C) 2000-2003 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * For information see http://hq.pm.waw.pl/hdlc/
11 *
12 * Sources of information:
13 * Hitachi HD64570 SCA User's Manual
14 * Moxa C101 User's Manual
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/moduleparam.h>
25#include <linux/netdevice.h>
26#include <linux/hdlc.h>
27#include <linux/delay.h>
28#include <asm/io.h>
29
30#include "hd64570.h"
31
32
33static const char* version = "Moxa C101 driver version: 1.15";
34static const char* devname = "C101";
35
36#undef DEBUG_PKT
37#define DEBUG_RINGS
38
39#define C101_PAGE 0x1D00
40#define C101_DTR 0x1E00
41#define C101_SCA 0x1F00
42#define C101_WINDOW_SIZE 0x2000
43#define C101_MAPPED_RAM_SIZE 0x4000
44
45#define RAM_SIZE (256 * 1024)
46#define TX_RING_BUFFERS 10
47#define RX_RING_BUFFERS ((RAM_SIZE - C101_WINDOW_SIZE) / \
48 (sizeof(pkt_desc) + HDLC_MAX_MRU) - TX_RING_BUFFERS)
49
50#define CLOCK_BASE 9830400 /* 9.8304 MHz */
51#define PAGE0_ALWAYS_MAPPED
52
53static char *hw; /* pointer to hw=xxx command line string */
54
55
56typedef struct card_s {
57 struct net_device *dev;
58 spinlock_t lock; /* TX lock */
59 u8 __iomem *win0base; /* ISA window base address */
60 u32 phy_winbase; /* ISA physical base address */
61 sync_serial_settings settings;
62 int rxpart; /* partial frame received, next frame invalid*/
63 unsigned short encoding;
64 unsigned short parity;
65 u16 rx_ring_buffers; /* number of buffers in a ring */
66 u16 tx_ring_buffers;
67 u16 buff_offset; /* offset of first buffer of first channel */
68 u16 rxin; /* rx ring buffer 'in' pointer */
69 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
70 u16 txlast;
71 u8 rxs, txs, tmc; /* SCA registers */
72 u8 irq; /* IRQ (3-15) */
73 u8 page;
74
75 struct card_s *next_card;
76}card_t;
77
78typedef card_t port_t;
79
80static card_t *first_card;
81static card_t **new_card = &first_card;
82
83
84#define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg))
85#define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg))
86#define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg))
87
88/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
89#define sca_outw(value, reg, card) do { \
90 writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
91 writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg+1));\
92} while(0)
93
94#define port_to_card(port) (port)
95#define log_node(port) (0)
96#define phy_node(port) (0)
97#define winsize(card) (C101_WINDOW_SIZE)
98#define win0base(card) ((card)->win0base)
99#define winbase(card) ((card)->win0base + 0x2000)
100#define get_port(card, port) (card)
101static void sca_msci_intr(port_t *port);
102
103
104static inline u8 sca_get_page(card_t *card)
105{
106 return card->page;
107}
108
109static inline void openwin(card_t *card, u8 page)
110{
111 card->page = page;
112 writeb(page, card->win0base + C101_PAGE);
113}
114
115
116#include "hd6457x.c"
117
118
119static void sca_msci_intr(port_t *port)
120{
121 struct net_device *dev = port_to_dev(port);
122 card_t* card = port_to_card(port);
123 u8 stat = sca_in(MSCI1_OFFSET + ST1, card); /* read MSCI ST1 status */
124
125 /* Reset MSCI TX underrun status bit */
126 sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, card);
127
128 if (stat & ST1_UDRN) {
129 struct net_device_stats *stats = hdlc_stats(dev);
130 stats->tx_errors++; /* TX Underrun error detected */
131 stats->tx_fifo_errors++;
132 }
133
134 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */
135 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, card);
136
137 if (stat & ST1_CDCD)
138 hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD),
139 dev);
140}
141
142
143static void c101_set_iface(port_t *port)
144{
145 u8 rxs = port->rxs & CLK_BRG_MASK;
146 u8 txs = port->txs & CLK_BRG_MASK;
147
148 switch(port->settings.clock_type) {
149 case CLOCK_INT:
150 rxs |= CLK_BRG_RX; /* TX clock */
151 txs |= CLK_RXCLK_TX; /* BRG output */
152 break;
153
154 case CLOCK_TXINT:
155 rxs |= CLK_LINE_RX; /* RXC input */
156 txs |= CLK_BRG_TX; /* BRG output */
157 break;
158
159 case CLOCK_TXFROMRX:
160 rxs |= CLK_LINE_RX; /* RXC input */
161 txs |= CLK_RXCLK_TX; /* RX clock */
162 break;
163
164 default: /* EXTernal clock */
165 rxs |= CLK_LINE_RX; /* RXC input */
166 txs |= CLK_LINE_TX; /* TXC input */
167 }
168
169 port->rxs = rxs;
170 port->txs = txs;
171 sca_out(rxs, MSCI1_OFFSET + RXS, port);
172 sca_out(txs, MSCI1_OFFSET + TXS, port);
173 sca_set_port(port);
174}
175
176
177static int c101_open(struct net_device *dev)
178{
179 port_t *port = dev_to_port(dev);
180 int result;
181
182 result = hdlc_open(dev);
183 if (result)
184 return result;
185
186 writeb(1, port->win0base + C101_DTR);
187 sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */
188 sca_open(dev);
189 /* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */
190 sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port);
191 sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
192
193 hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD), dev);
194 printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port));
195
196 /* enable MSCI1 CDCD interrupt */
197 sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
198 sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port);
199 sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */
200 c101_set_iface(port);
201 return 0;
202}
203
204
205static int c101_close(struct net_device *dev)
206{
207 port_t *port = dev_to_port(dev);
208
209 sca_close(dev);
210 writeb(0, port->win0base + C101_DTR);
211 sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port);
212 hdlc_close(dev);
213 return 0;
214}
215
216
217static int c101_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
218{
219 const size_t size = sizeof(sync_serial_settings);
220 sync_serial_settings new_line;
221 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
222 port_t *port = dev_to_port(dev);
223
224#ifdef DEBUG_RINGS
225 if (cmd == SIOCDEVPRIVATE) {
226 sca_dump_rings(dev);
227 printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n",
228 sca_in(MSCI1_OFFSET + ST0, port),
229 sca_in(MSCI1_OFFSET + ST1, port),
230 sca_in(MSCI1_OFFSET + ST2, port),
231 sca_in(MSCI1_OFFSET + ST3, port));
232 return 0;
233 }
234#endif
235 if (cmd != SIOCWANDEV)
236 return hdlc_ioctl(dev, ifr, cmd);
237
238 switch(ifr->ifr_settings.type) {
239 case IF_GET_IFACE:
240 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
241 if (ifr->ifr_settings.size < size) {
242 ifr->ifr_settings.size = size; /* data size wanted */
243 return -ENOBUFS;
244 }
245 if (copy_to_user(line, &port->settings, size))
246 return -EFAULT;
247 return 0;
248
249 case IF_IFACE_SYNC_SERIAL:
250 if(!capable(CAP_NET_ADMIN))
251 return -EPERM;
252
253 if (copy_from_user(&new_line, line, size))
254 return -EFAULT;
255
256 if (new_line.clock_type != CLOCK_EXT &&
257 new_line.clock_type != CLOCK_TXFROMRX &&
258 new_line.clock_type != CLOCK_INT &&
259 new_line.clock_type != CLOCK_TXINT)
260 return -EINVAL; /* No such clock setting */
261
262 if (new_line.loopback != 0 && new_line.loopback != 1)
263 return -EINVAL;
264
265 memcpy(&port->settings, &new_line, size); /* Update settings */
266 c101_set_iface(port);
267 return 0;
268
269 default:
270 return hdlc_ioctl(dev, ifr, cmd);
271 }
272}
273
274
275
276static void c101_destroy_card(card_t *card)
277{
278 readb(card->win0base + C101_PAGE); /* Resets SCA? */
279
280 if (card->irq)
281 free_irq(card->irq, card);
282
283 if (card->win0base) {
284 iounmap(card->win0base);
285 release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE);
286 }
287
288 free_netdev(card->dev);
289
290 kfree(card);
291}
292
293
294
295static int __init c101_run(unsigned long irq, unsigned long winbase)
296{
297 struct net_device *dev;
298 hdlc_device *hdlc;
299 card_t *card;
300 int result;
301
302 if (irq<3 || irq>15 || irq == 6) /* FIXME */ {
303 printk(KERN_ERR "c101: invalid IRQ value\n");
304 return -ENODEV;
305 }
306
307 if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) {
308 printk(KERN_ERR "c101: invalid RAM value\n");
309 return -ENODEV;
310 }
311
312 card = kmalloc(sizeof(card_t), GFP_KERNEL);
313 if (card == NULL) {
314 printk(KERN_ERR "c101: unable to allocate memory\n");
315 return -ENOBUFS;
316 }
317 memset(card, 0, sizeof(card_t));
318
319 card->dev = alloc_hdlcdev(card);
320 if (!card->dev) {
321 printk(KERN_ERR "c101: unable to allocate memory\n");
322 kfree(card);
323 return -ENOBUFS;
324 }
325
326 if (request_irq(irq, sca_intr, 0, devname, card)) {
327 printk(KERN_ERR "c101: could not allocate IRQ\n");
328 c101_destroy_card(card);
329 return(-EBUSY);
330 }
331 card->irq = irq;
332
333 if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
334 printk(KERN_ERR "c101: could not request RAM window\n");
335 c101_destroy_card(card);
336 return(-EBUSY);
337 }
338 card->phy_winbase = winbase;
339 card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
340 if (!card->win0base) {
341 printk(KERN_ERR "c101: could not map I/O address\n");
342 c101_destroy_card(card);
343 return -EBUSY;
344 }
345
346 card->tx_ring_buffers = TX_RING_BUFFERS;
347 card->rx_ring_buffers = RX_RING_BUFFERS;
348 card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */
349
350 readb(card->win0base + C101_PAGE); /* Resets SCA? */
351 udelay(100);
352 writeb(0, card->win0base + C101_PAGE);
353 writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */
354
355 sca_init(card, 0);
356
357 dev = port_to_dev(card);
358 hdlc = dev_to_hdlc(dev);
359
360 spin_lock_init(&card->lock);
361 SET_MODULE_OWNER(dev);
362 dev->irq = irq;
363 dev->mem_start = winbase;
364 dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
365 dev->tx_queue_len = 50;
366 dev->do_ioctl = c101_ioctl;
367 dev->open = c101_open;
368 dev->stop = c101_close;
369 hdlc->attach = sca_attach;
370 hdlc->xmit = sca_xmit;
371 card->settings.clock_type = CLOCK_EXT;
372
373 result = register_hdlc_device(dev);
374 if (result) {
375 printk(KERN_WARNING "c101: unable to register hdlc device\n");
376 c101_destroy_card(card);
377 return result;
378 }
379
380 sca_init_sync_port(card); /* Set up C101 memory */
381 hdlc_set_carrier(!(sca_in(MSCI1_OFFSET + ST3, card) & ST3_DCD), dev);
382
383 printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
384 " using %u TX + %u RX packets rings\n",
385 dev->name, card->irq,
386 card->tx_ring_buffers, card->rx_ring_buffers);
387
388 *new_card = card;
389 new_card = &card->next_card;
390 return 0;
391}
392
393
394
395static int __init c101_init(void)
396{
397 if (hw == NULL) {
398#ifdef MODULE
399 printk(KERN_INFO "c101: no card initialized\n");
400#endif
401 return -ENOSYS; /* no parameters specified, abort */
402 }
403
404 printk(KERN_INFO "%s\n", version);
405
406 do {
407 unsigned long irq, ram;
408
409 irq = simple_strtoul(hw, &hw, 0);
410
411 if (*hw++ != ',')
412 break;
413 ram = simple_strtoul(hw, &hw, 0);
414
415 if (*hw == ':' || *hw == '\x0')
416 c101_run(irq, ram);
417
418 if (*hw == '\x0')
419 return first_card ? 0 : -ENOSYS;
420 }while(*hw++ == ':');
421
422 printk(KERN_ERR "c101: invalid hardware parameters\n");
423 return first_card ? 0 : -ENOSYS;
424}
425
426
427static void __exit c101_cleanup(void)
428{
429 card_t *card = first_card;
430
431 while (card) {
432 card_t *ptr = card;
433 card = card->next_card;
434 unregister_hdlc_device(port_to_dev(ptr));
435 c101_destroy_card(ptr);
436 }
437}
438
439
440module_init(c101_init);
441module_exit(c101_cleanup);
442
443MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
444MODULE_DESCRIPTION("Moxa C101 serial port driver");
445MODULE_LICENSE("GPL v2");
446module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
new file mode 100644
index 000000000000..921a573372e9
--- /dev/null
+++ b/drivers/net/wan/cosa.c
@@ -0,0 +1,2100 @@
1/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */
2
3/*
4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21/*
22 * The driver for the SRP and COSA synchronous serial cards.
23 *
24 * HARDWARE INFO
25 *
26 * Both cards are developed at the Institute of Computer Science,
27 * Masaryk University (http://www.ics.muni.cz/). The hardware is
28 * developed by Jiri Novotny <novotny@ics.muni.cz>. More information
29 * and the photo of both cards is available at
30 * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares
31 * and other goods can be downloaded from ftp://ftp.ics.muni.cz/pub/cosa/.
32 * For Linux-specific utilities, see below in the "Software info" section.
33 * If you want to order the card, contact Jiri Novotny.
34 *
35 * The SRP (serial port?, the Czech word "srp" means "sickle") card
36 * is a 2-port intelligent (with its own 8-bit CPU) synchronous serial card
37 * with V.24 interfaces up to 80kb/s each.
38 *
39 * The COSA (communication serial adapter?, the Czech word "kosa" means
40 * "scythe") is a next-generation sync/async board with two interfaces
41 * - currently any of V.24, X.21, V.35 and V.36 can be selected.
42 * It has a 16-bit SAB80166 CPU and can do up to 10 Mb/s per channel.
43 * The 8-channels version is in development.
44 *
45 * Both types have downloadable firmware and communicate via ISA DMA.
46 * COSA can be also a bus-mastering device.
47 *
48 * SOFTWARE INFO
49 *
50 * The homepage of the Linux driver is at http://www.fi.muni.cz/~kas/cosa/.
51 * The CVS tree of Linux driver can be viewed there, as well as the
52 * firmware binaries and user-space utilities for downloading the firmware
53 * into the card and setting up the card.
54 *
55 * The Linux driver (unlike the present *BSD drivers :-) can work even
56 * for the COSA and SRP in one computer and allows each channel to work
57 * in one of the three modes (character device, Cisco HDLC, Sync PPP).
58 *
59 * AUTHOR
60 *
61 * The Linux driver was written by Jan "Yenya" Kasprzak <kas@fi.muni.cz>.
62 *
63 * You can mail me bugfixes and even success reports. I am especially
64 * interested in the SMP and/or muliti-channel success/failure reports
65 * (I wonder if I did the locking properly :-).
66 *
67 * THE AUTHOR USED THE FOLLOWING SOURCES WHEN PROGRAMMING THE DRIVER
68 *
69 * The COSA/SRP NetBSD driver by Zdenek Salvet and Ivos Cernohlavek
70 * The skeleton.c by Donald Becker
71 * The SDL Riscom/N2 driver by Mike Natale
72 * The Comtrol Hostess SV11 driver by Alan Cox
73 * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox
74 */
75/*
76 * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br>
77 * fixed a deadlock in cosa_sppp_open
78 */
79
80/* ---------- Headers, macros, data structures ---------- */
81
82#include <linux/config.h>
83#include <linux/module.h>
84#include <linux/kernel.h>
85#include <linux/slab.h>
86#include <linux/poll.h>
87#include <linux/fs.h>
88#include <linux/devfs_fs_kernel.h>
89#include <linux/interrupt.h>
90#include <linux/delay.h>
91#include <linux/errno.h>
92#include <linux/ioport.h>
93#include <linux/netdevice.h>
94#include <linux/spinlock.h>
95#include <linux/smp_lock.h>
96#include <linux/device.h>
97
98#undef COSA_SLOW_IO /* for testing purposes only */
99#undef REALLY_SLOW_IO
100
101#include <asm/io.h>
102#include <asm/dma.h>
103#include <asm/byteorder.h>
104
105#include <net/syncppp.h>
106#include "cosa.h"
107
108/* Maximum length of the identification string. */
109#define COSA_MAX_ID_STRING 128
110
111/* Maximum length of the channel name */
112#define COSA_MAX_NAME (sizeof("cosaXXXcXXX")+1)
113
114/* Per-channel data structure */
115
116struct channel_data {
117 void *if_ptr; /* General purpose pointer (used by SPPP) */
118 int usage; /* Usage count; >0 for chrdev, -1 for netdev */
119 int num; /* Number of the channel */
120 struct cosa_data *cosa; /* Pointer to the per-card structure */
121 int txsize; /* Size of transmitted data */
122 char *txbuf; /* Transmit buffer */
123 char name[COSA_MAX_NAME]; /* channel name */
124
125 /* The HW layer interface */
126 /* routine called from the RX interrupt */
127 char *(*setup_rx)(struct channel_data *channel, int size);
128 /* routine called when the RX is done (from the EOT interrupt) */
129 int (*rx_done)(struct channel_data *channel);
130 /* routine called when the TX is done (from the EOT interrupt) */
131 int (*tx_done)(struct channel_data *channel, int size);
132
133 /* Character device parts */
134 struct semaphore rsem, wsem;
135 char *rxdata;
136 int rxsize;
137 wait_queue_head_t txwaitq, rxwaitq;
138 int tx_status, rx_status;
139
140 /* SPPP/HDLC device parts */
141 struct ppp_device pppdev;
142 struct sk_buff *rx_skb, *tx_skb;
143 struct net_device_stats stats;
144};
145
146/* cosa->firmware_status bits */
147#define COSA_FW_RESET (1<<0) /* Is the ROM monitor active? */
148#define COSA_FW_DOWNLOAD (1<<1) /* Is the microcode downloaded? */
149#define COSA_FW_START (1<<2) /* Is the microcode running? */
150
151struct cosa_data {
152 int num; /* Card number */
153 char name[COSA_MAX_NAME]; /* Card name - e.g "cosa0" */
154 unsigned int datareg, statusreg; /* I/O ports */
155 unsigned short irq, dma; /* IRQ and DMA number */
156 unsigned short startaddr; /* Firmware start address */
157 unsigned short busmaster; /* Use busmastering? */
158 int nchannels; /* # of channels on this card */
159 int driver_status; /* For communicating with firmware */
160 int firmware_status; /* Downloaded, reseted, etc. */
161 long int rxbitmap, txbitmap; /* Bitmap of channels who are willing to send/receive data */
162 long int rxtx; /* RX or TX in progress? */
163 int enabled;
164 int usage; /* usage count */
165 int txchan, txsize, rxsize;
166 struct channel_data *rxchan;
167 char *bouncebuf;
168 char *txbuf, *rxbuf;
169 struct channel_data *chan;
170 spinlock_t lock; /* For exclusive operations on this structure */
171 char id_string[COSA_MAX_ID_STRING]; /* ROM monitor ID string */
172 char *type; /* card type */
173};
174
175/*
176 * Define this if you want all the possible ports to be autoprobed.
177 * It is here but it probably is not a good idea to use this.
178 */
179/* #define COSA_ISA_AUTOPROBE 1 */
180
181/*
182 * Character device major number. 117 was allocated for us.
183 * The value of 0 means to allocate a first free one.
184 */
185static int cosa_major = 117;
186
187/*
188 * Encoding of the minor numbers:
189 * The lowest CARD_MINOR_BITS bits means the channel on the single card,
190 * the highest bits means the card number.
191 */
192#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved
193 * for the single card */
194/*
195 * The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
196 * macro doesn't like anything other than the raw number as an argument :-(
197 */
198#define MAX_CARDS 16
199/* #define MAX_CARDS (1 << (8-CARD_MINOR_BITS)) */
200
201#define DRIVER_RX_READY 0x0001
202#define DRIVER_TX_READY 0x0002
203#define DRIVER_TXMAP_SHIFT 2
204#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */
205
206/*
207 * for cosa->rxtx - indicates whether either transmit or receive is
208 * in progress. These values are mean number of the bit.
209 */
210#define TXBIT 0
211#define RXBIT 1
212#define IRQBIT 2
213
214#define COSA_MTU 2000 /* FIXME: I don't know this exactly */
215
216#undef DEBUG_DATA //1 /* Dump the data read or written to the channel */
217#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */
218#undef DEBUG_IO //1 /* Dump the I/O traffic */
219
220#define TX_TIMEOUT (5*HZ)
221
222/* Maybe the following should be allocated dynamically */
223static struct cosa_data cosa_cards[MAX_CARDS];
224static int nr_cards;
225
226#ifdef COSA_ISA_AUTOPROBE
227static int io[MAX_CARDS+1] = { 0x220, 0x228, 0x210, 0x218, 0, };
228/* NOTE: DMA is not autoprobed!!! */
229static int dma[MAX_CARDS+1] = { 1, 7, 1, 7, 1, 7, 1, 7, 0, };
230#else
231static int io[MAX_CARDS+1];
232static int dma[MAX_CARDS+1];
233#endif
234/* IRQ can be safely autoprobed */
235static int irq[MAX_CARDS+1] = { -1, -1, -1, -1, -1, -1, 0, };
236
237/* for class stuff*/
238static struct class_simple *cosa_class;
239
240#ifdef MODULE
241module_param_array(io, int, NULL, 0);
242MODULE_PARM_DESC(io, "The I/O bases of the COSA or SRP cards");
243module_param_array(irq, int, NULL, 0);
244MODULE_PARM_DESC(irq, "The IRQ lines of the COSA or SRP cards");
245module_param_array(dma, int, NULL, 0);
246MODULE_PARM_DESC(dma, "The DMA channels of the COSA or SRP cards");
247
248MODULE_AUTHOR("Jan \"Yenya\" Kasprzak, <kas@fi.muni.cz>");
249MODULE_DESCRIPTION("Modular driver for the COSA or SRP synchronous card");
250MODULE_LICENSE("GPL");
251#endif
252
253/* I use this mainly for testing purposes */
254#ifdef COSA_SLOW_IO
255#define cosa_outb outb_p
256#define cosa_outw outw_p
257#define cosa_inb inb_p
258#define cosa_inw inw_p
259#else
260#define cosa_outb outb
261#define cosa_outw outw
262#define cosa_inb inb
263#define cosa_inw inw
264#endif
265
266#define is_8bit(cosa) (!(cosa->datareg & 0x08))
267
268#define cosa_getstatus(cosa) (cosa_inb(cosa->statusreg))
269#define cosa_putstatus(cosa, stat) (cosa_outb(stat, cosa->statusreg))
270#define cosa_getdata16(cosa) (cosa_inw(cosa->datareg))
271#define cosa_getdata8(cosa) (cosa_inb(cosa->datareg))
272#define cosa_putdata16(cosa, dt) (cosa_outw(dt, cosa->datareg))
273#define cosa_putdata8(cosa, dt) (cosa_outb(dt, cosa->datareg))
274
275/* Initialization stuff */
276static int cosa_probe(int ioaddr, int irq, int dma);
277
278/* HW interface */
279static void cosa_enable_rx(struct channel_data *chan);
280static void cosa_disable_rx(struct channel_data *chan);
281static int cosa_start_tx(struct channel_data *channel, char *buf, int size);
282static void cosa_kick(struct cosa_data *cosa);
283static int cosa_dma_able(struct channel_data *chan, char *buf, int data);
284
285/* SPPP/HDLC stuff */
286static void sppp_channel_init(struct channel_data *chan);
287static void sppp_channel_delete(struct channel_data *chan);
288static int cosa_sppp_open(struct net_device *d);
289static int cosa_sppp_close(struct net_device *d);
290static void cosa_sppp_timeout(struct net_device *d);
291static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *d);
292static char *sppp_setup_rx(struct channel_data *channel, int size);
293static int sppp_rx_done(struct channel_data *channel);
294static int sppp_tx_done(struct channel_data *channel, int size);
295static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
296static struct net_device_stats *cosa_net_stats(struct net_device *dev);
297
298/* Character device */
299static void chardev_channel_init(struct channel_data *chan);
300static char *chrdev_setup_rx(struct channel_data *channel, int size);
301static int chrdev_rx_done(struct channel_data *channel);
302static int chrdev_tx_done(struct channel_data *channel, int size);
303static ssize_t cosa_read(struct file *file,
304 char __user *buf, size_t count, loff_t *ppos);
305static ssize_t cosa_write(struct file *file,
306 const char __user *buf, size_t count, loff_t *ppos);
307static unsigned int cosa_poll(struct file *file, poll_table *poll);
308static int cosa_open(struct inode *inode, struct file *file);
309static int cosa_release(struct inode *inode, struct file *file);
310static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
311 unsigned int cmd, unsigned long arg);
312#ifdef COSA_FASYNC_WORKING
313static int cosa_fasync(struct inode *inode, struct file *file, int on);
314#endif
315
316static struct file_operations cosa_fops = {
317 .owner = THIS_MODULE,
318 .llseek = no_llseek,
319 .read = cosa_read,
320 .write = cosa_write,
321 .poll = cosa_poll,
322 .ioctl = cosa_chardev_ioctl,
323 .open = cosa_open,
324 .release = cosa_release,
325#ifdef COSA_FASYNC_WORKING
326 .fasync = cosa_fasync,
327#endif
328};
329
330/* Ioctls */
331static int cosa_start(struct cosa_data *cosa, int address);
332static int cosa_reset(struct cosa_data *cosa);
333static int cosa_download(struct cosa_data *cosa, void __user *a);
334static int cosa_readmem(struct cosa_data *cosa, void __user *a);
335
336/* COSA/SRP ROM monitor */
337static int download(struct cosa_data *cosa, const char __user *data, int addr, int len);
338static int startmicrocode(struct cosa_data *cosa, int address);
339static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len);
340static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id);
341
342/* Auxilliary functions */
343static int get_wait_data(struct cosa_data *cosa);
344static int put_wait_data(struct cosa_data *cosa, int data);
345static int puthexnumber(struct cosa_data *cosa, int number);
346static void put_driver_status(struct cosa_data *cosa);
347static void put_driver_status_nolock(struct cosa_data *cosa);
348
349/* Interrupt handling */
350static irqreturn_t cosa_interrupt(int irq, void *cosa, struct pt_regs *regs);
351
352/* I/O ops debugging */
353#ifdef DEBUG_IO
354static void debug_data_in(struct cosa_data *cosa, int data);
355static void debug_data_out(struct cosa_data *cosa, int data);
356static void debug_data_cmd(struct cosa_data *cosa, int data);
357static void debug_status_in(struct cosa_data *cosa, int status);
358static void debug_status_out(struct cosa_data *cosa, int status);
359#endif
360
361
362/* ---------- Initialization stuff ---------- */
363
364static int __init cosa_init(void)
365{
366 int i, err = 0;
367
368 printk(KERN_INFO "cosa v1.08 (c) 1997-2000 Jan Kasprzak <kas@fi.muni.cz>\n");
369#ifdef CONFIG_SMP
370 printk(KERN_INFO "cosa: SMP found. Please mail any success/failure reports to the author.\n");
371#endif
372 if (cosa_major > 0) {
373 if (register_chrdev(cosa_major, "cosa", &cosa_fops)) {
374 printk(KERN_WARNING "cosa: unable to get major %d\n",
375 cosa_major);
376 err = -EIO;
377 goto out;
378 }
379 } else {
380 if (!(cosa_major=register_chrdev(0, "cosa", &cosa_fops))) {
381 printk(KERN_WARNING "cosa: unable to register chardev\n");
382 err = -EIO;
383 goto out;
384 }
385 }
386 for (i=0; i<MAX_CARDS; i++)
387 cosa_cards[i].num = -1;
388 for (i=0; io[i] != 0 && i < MAX_CARDS; i++)
389 cosa_probe(io[i], irq[i], dma[i]);
390 if (!nr_cards) {
391 printk(KERN_WARNING "cosa: no devices found.\n");
392 unregister_chrdev(cosa_major, "cosa");
393 err = -ENODEV;
394 goto out;
395 }
396 devfs_mk_dir("cosa");
397 cosa_class = class_simple_create(THIS_MODULE, "cosa");
398 if (IS_ERR(cosa_class)) {
399 err = PTR_ERR(cosa_class);
400 goto out_chrdev;
401 }
402 for (i=0; i<nr_cards; i++) {
403 class_simple_device_add(cosa_class, MKDEV(cosa_major, i),
404 NULL, "cosa%d", i);
405 err = devfs_mk_cdev(MKDEV(cosa_major, i),
406 S_IFCHR|S_IRUSR|S_IWUSR,
407 "cosa/%d", i);
408 if (err) {
409 class_simple_device_remove(MKDEV(cosa_major, i));
410 goto out_chrdev;
411 }
412 }
413 err = 0;
414 goto out;
415
416out_chrdev:
417 unregister_chrdev(cosa_major, "cosa");
418out:
419 return err;
420}
421module_init(cosa_init);
422
423static void __exit cosa_exit(void)
424{
425 struct cosa_data *cosa;
426 int i;
427 printk(KERN_INFO "Unloading the cosa module\n");
428
429 for (i=0; i<nr_cards; i++) {
430 class_simple_device_remove(MKDEV(cosa_major, i));
431 devfs_remove("cosa/%d", i);
432 }
433 class_simple_destroy(cosa_class);
434 devfs_remove("cosa");
435 for (cosa=cosa_cards; nr_cards--; cosa++) {
436 /* Clean up the per-channel data */
437 for (i=0; i<cosa->nchannels; i++) {
438 /* Chardev driver has no alloc'd per-channel data */
439 sppp_channel_delete(cosa->chan+i);
440 }
441 /* Clean up the per-card data */
442 kfree(cosa->chan);
443 kfree(cosa->bouncebuf);
444 free_irq(cosa->irq, cosa);
445 free_dma(cosa->dma);
446 release_region(cosa->datareg,is_8bit(cosa)?2:4);
447 }
448 unregister_chrdev(cosa_major, "cosa");
449}
450module_exit(cosa_exit);
451
452/*
453 * This function should register all the net devices needed for the
454 * single channel.
455 */
456static __inline__ void channel_init(struct channel_data *chan)
457{
458 sprintf(chan->name, "cosa%dc%d", chan->cosa->num, chan->num);
459
460 /* Initialize the chardev data structures */
461 chardev_channel_init(chan);
462
463 /* Register the sppp interface */
464 sppp_channel_init(chan);
465}
466
467static int cosa_probe(int base, int irq, int dma)
468{
469 struct cosa_data *cosa = cosa_cards+nr_cards;
470 int i, err = 0;
471
472 memset(cosa, 0, sizeof(struct cosa_data));
473
474 /* Checking validity of parameters: */
475 /* IRQ should be 2-7 or 10-15; negative IRQ means autoprobe */
476 if ((irq >= 0 && irq < 2) || irq > 15 || (irq < 10 && irq > 7)) {
477 printk (KERN_INFO "cosa_probe: invalid IRQ %d\n", irq);
478 return -1;
479 }
480 /* I/O address should be between 0x100 and 0x3ff and should be
481 * multiple of 8. */
482 if (base < 0x100 || base > 0x3ff || base & 0x7) {
483 printk (KERN_INFO "cosa_probe: invalid I/O address 0x%x\n",
484 base);
485 return -1;
486 }
487 /* DMA should be 0,1 or 3-7 */
488 if (dma < 0 || dma == 4 || dma > 7) {
489 printk (KERN_INFO "cosa_probe: invalid DMA %d\n", dma);
490 return -1;
491 }
492 /* and finally, on 16-bit COSA DMA should be 4-7 and
493 * I/O base should not be multiple of 0x10 */
494 if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
495 printk (KERN_INFO "cosa_probe: 8/16 bit base and DMA mismatch"
496 " (base=0x%x, dma=%d)\n", base, dma);
497 return -1;
498 }
499
500 cosa->dma = dma;
501 cosa->datareg = base;
502 cosa->statusreg = is_8bit(cosa)?base+1:base+2;
503 spin_lock_init(&cosa->lock);
504
505 if (!request_region(base, is_8bit(cosa)?2:4,"cosa"))
506 return -1;
507
508 if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
509 printk(KERN_DEBUG "cosa: probe at 0x%x failed.\n", base);
510 err = -1;
511 goto err_out;
512 }
513
514 /* Test the validity of identification string */
515 if (!strncmp(cosa->id_string, "SRP", 3))
516 cosa->type = "srp";
517 else if (!strncmp(cosa->id_string, "COSA", 4))
518 cosa->type = is_8bit(cosa)? "cosa8": "cosa16";
519 else {
520/* Print a warning only if we are not autoprobing */
521#ifndef COSA_ISA_AUTOPROBE
522 printk(KERN_INFO "cosa: valid signature not found at 0x%x.\n",
523 base);
524#endif
525 err = -1;
526 goto err_out;
527 }
528 /* Update the name of the region now we know the type of card */
529 release_region(base, is_8bit(cosa)?2:4);
530 if (!request_region(base, is_8bit(cosa)?2:4, cosa->type)) {
531 printk(KERN_DEBUG "cosa: changing name at 0x%x failed.\n", base);
532 return -1;
533 }
534
535 /* Now do IRQ autoprobe */
536 if (irq < 0) {
537 unsigned long irqs;
538/* printk(KERN_INFO "IRQ autoprobe\n"); */
539 irqs = probe_irq_on();
540 /*
541 * Enable interrupt on tx buffer empty (it sure is)
542 * really sure ?
543 * FIXME: When this code is not used as module, we should
544 * probably call udelay() instead of the interruptible sleep.
545 */
546 set_current_state(TASK_INTERRUPTIBLE);
547 cosa_putstatus(cosa, SR_TX_INT_ENA);
548 schedule_timeout(30);
549 irq = probe_irq_off(irqs);
550 /* Disable all IRQs from the card */
551 cosa_putstatus(cosa, 0);
552 /* Empty the received data register */
553 cosa_getdata8(cosa);
554
555 if (irq < 0) {
556 printk (KERN_INFO "cosa IRQ autoprobe: multiple interrupts obtained (%d, board at 0x%x)\n",
557 irq, cosa->datareg);
558 err = -1;
559 goto err_out;
560 }
561 if (irq == 0) {
562 printk (KERN_INFO "cosa IRQ autoprobe: no interrupt obtained (board at 0x%x)\n",
563 cosa->datareg);
564 /* return -1; */
565 }
566 }
567
568 cosa->irq = irq;
569 cosa->num = nr_cards;
570 cosa->usage = 0;
571 cosa->nchannels = 2; /* FIXME: how to determine this? */
572
573 if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) {
574 err = -1;
575 goto err_out;
576 }
577 if (request_dma(cosa->dma, cosa->type)) {
578 err = -1;
579 goto err_out1;
580 }
581
582 cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL|GFP_DMA);
583 if (!cosa->bouncebuf) {
584 err = -ENOMEM;
585 goto err_out2;
586 }
587 sprintf(cosa->name, "cosa%d", cosa->num);
588
589 /* Initialize the per-channel data */
590 cosa->chan = kmalloc(sizeof(struct channel_data)*cosa->nchannels,
591 GFP_KERNEL);
592 if (!cosa->chan) {
593 err = -ENOMEM;
594 goto err_out3;
595 }
596 memset(cosa->chan, 0, sizeof(struct channel_data)*cosa->nchannels);
597 for (i=0; i<cosa->nchannels; i++) {
598 cosa->chan[i].cosa = cosa;
599 cosa->chan[i].num = i;
600 channel_init(cosa->chan+i);
601 }
602
603 printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n",
604 cosa->num, cosa->id_string, cosa->type,
605 cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels);
606
607 return nr_cards++;
608err_out3:
609 kfree(cosa->bouncebuf);
610err_out2:
611 free_dma(cosa->dma);
612err_out1:
613 free_irq(cosa->irq, cosa);
614err_out:
615 release_region(cosa->datareg,is_8bit(cosa)?2:4);
616 printk(KERN_NOTICE "cosa%d: allocating resources failed\n",
617 cosa->num);
618 return err;
619}
620
621
622/*---------- SPPP/HDLC netdevice ---------- */
623
624static void cosa_setup(struct net_device *d)
625{
626 d->open = cosa_sppp_open;
627 d->stop = cosa_sppp_close;
628 d->hard_start_xmit = cosa_sppp_tx;
629 d->do_ioctl = cosa_sppp_ioctl;
630 d->get_stats = cosa_net_stats;
631 d->tx_timeout = cosa_sppp_timeout;
632 d->watchdog_timeo = TX_TIMEOUT;
633}
634
635static void sppp_channel_init(struct channel_data *chan)
636{
637 struct net_device *d;
638 chan->if_ptr = &chan->pppdev;
639 d = alloc_netdev(0, chan->name, cosa_setup);
640 if (!d) {
641 printk(KERN_WARNING "%s: alloc_netdev failed.\n", chan->name);
642 return;
643 }
644 chan->pppdev.dev = d;
645 d->base_addr = chan->cosa->datareg;
646 d->irq = chan->cosa->irq;
647 d->dma = chan->cosa->dma;
648 d->priv = chan;
649 sppp_attach(&chan->pppdev);
650 if (register_netdev(d)) {
651 printk(KERN_WARNING "%s: register_netdev failed.\n", d->name);
652 sppp_detach(d);
653 free_netdev(d);
654 chan->pppdev.dev = NULL;
655 return;
656 }
657}
658
659static void sppp_channel_delete(struct channel_data *chan)
660{
661 unregister_netdev(chan->pppdev.dev);
662 sppp_detach(chan->pppdev.dev);
663 free_netdev(chan->pppdev.dev);
664 chan->pppdev.dev = NULL;
665}
666
667static int cosa_sppp_open(struct net_device *d)
668{
669 struct channel_data *chan = d->priv;
670 int err;
671 unsigned long flags;
672
673 if (!(chan->cosa->firmware_status & COSA_FW_START)) {
674 printk(KERN_NOTICE "%s: start the firmware first (status %d)\n",
675 chan->cosa->name, chan->cosa->firmware_status);
676 return -EPERM;
677 }
678 spin_lock_irqsave(&chan->cosa->lock, flags);
679 if (chan->usage != 0) {
680 printk(KERN_WARNING "%s: sppp_open called with usage count %d\n",
681 chan->name, chan->usage);
682 spin_unlock_irqrestore(&chan->cosa->lock, flags);
683 return -EBUSY;
684 }
685 chan->setup_rx = sppp_setup_rx;
686 chan->tx_done = sppp_tx_done;
687 chan->rx_done = sppp_rx_done;
688 chan->usage=-1;
689 chan->cosa->usage++;
690 spin_unlock_irqrestore(&chan->cosa->lock, flags);
691
692 err = sppp_open(d);
693 if (err) {
694 spin_lock_irqsave(&chan->cosa->lock, flags);
695 chan->usage=0;
696 chan->cosa->usage--;
697
698 spin_unlock_irqrestore(&chan->cosa->lock, flags);
699 return err;
700 }
701
702 netif_start_queue(d);
703 cosa_enable_rx(chan);
704 return 0;
705}
706
707static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev)
708{
709 struct channel_data *chan = dev->priv;
710
711 netif_stop_queue(dev);
712
713 chan->tx_skb = skb;
714 cosa_start_tx(chan, skb->data, skb->len);
715 return 0;
716}
717
718static void cosa_sppp_timeout(struct net_device *dev)
719{
720 struct channel_data *chan = dev->priv;
721
722 if (test_bit(RXBIT, &chan->cosa->rxtx)) {
723 chan->stats.rx_errors++;
724 chan->stats.rx_missed_errors++;
725 } else {
726 chan->stats.tx_errors++;
727 chan->stats.tx_aborted_errors++;
728 }
729 cosa_kick(chan->cosa);
730 if (chan->tx_skb) {
731 dev_kfree_skb(chan->tx_skb);
732 chan->tx_skb = NULL;
733 }
734 netif_wake_queue(dev);
735}
736
737static int cosa_sppp_close(struct net_device *d)
738{
739 struct channel_data *chan = d->priv;
740 unsigned long flags;
741
742 netif_stop_queue(d);
743 sppp_close(d);
744 cosa_disable_rx(chan);
745 spin_lock_irqsave(&chan->cosa->lock, flags);
746 if (chan->rx_skb) {
747 kfree_skb(chan->rx_skb);
748 chan->rx_skb = NULL;
749 }
750 if (chan->tx_skb) {
751 kfree_skb(chan->tx_skb);
752 chan->tx_skb = NULL;
753 }
754 chan->usage=0;
755 chan->cosa->usage--;
756 spin_unlock_irqrestore(&chan->cosa->lock, flags);
757 return 0;
758}
759
760static char *sppp_setup_rx(struct channel_data *chan, int size)
761{
762 /*
763 * We can safely fall back to non-dma-able memory, because we have
764 * the cosa->bouncebuf pre-allocated.
765 */
766 if (chan->rx_skb)
767 kfree_skb(chan->rx_skb);
768 chan->rx_skb = dev_alloc_skb(size);
769 if (chan->rx_skb == NULL) {
770 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n",
771 chan->name);
772 chan->stats.rx_dropped++;
773 return NULL;
774 }
775 chan->pppdev.dev->trans_start = jiffies;
776 return skb_put(chan->rx_skb, size);
777}
778
779static int sppp_rx_done(struct channel_data *chan)
780{
781 if (!chan->rx_skb) {
782 printk(KERN_WARNING "%s: rx_done with empty skb!\n",
783 chan->name);
784 chan->stats.rx_errors++;
785 chan->stats.rx_frame_errors++;
786 return 0;
787 }
788 chan->rx_skb->protocol = htons(ETH_P_WAN_PPP);
789 chan->rx_skb->dev = chan->pppdev.dev;
790 chan->rx_skb->mac.raw = chan->rx_skb->data;
791 chan->stats.rx_packets++;
792 chan->stats.rx_bytes += chan->cosa->rxsize;
793 netif_rx(chan->rx_skb);
794 chan->rx_skb = NULL;
795 chan->pppdev.dev->last_rx = jiffies;
796 return 0;
797}
798
799/* ARGSUSED */
800static int sppp_tx_done(struct channel_data *chan, int size)
801{
802 if (!chan->tx_skb) {
803 printk(KERN_WARNING "%s: tx_done with empty skb!\n",
804 chan->name);
805 chan->stats.tx_errors++;
806 chan->stats.tx_aborted_errors++;
807 return 1;
808 }
809 dev_kfree_skb_irq(chan->tx_skb);
810 chan->tx_skb = NULL;
811 chan->stats.tx_packets++;
812 chan->stats.tx_bytes += size;
813 netif_wake_queue(chan->pppdev.dev);
814 return 1;
815}
816
817static struct net_device_stats *cosa_net_stats(struct net_device *dev)
818{
819 struct channel_data *chan = dev->priv;
820 return &chan->stats;
821}
822
823
824/*---------- Character device ---------- */
825
826static void chardev_channel_init(struct channel_data *chan)
827{
828 init_MUTEX(&chan->rsem);
829 init_MUTEX(&chan->wsem);
830}
831
832static ssize_t cosa_read(struct file *file,
833 char __user *buf, size_t count, loff_t *ppos)
834{
835 DECLARE_WAITQUEUE(wait, current);
836 unsigned long flags;
837 struct channel_data *chan = file->private_data;
838 struct cosa_data *cosa = chan->cosa;
839 char *kbuf;
840
841 if (!(cosa->firmware_status & COSA_FW_START)) {
842 printk(KERN_NOTICE "%s: start the firmware first (status %d)\n",
843 cosa->name, cosa->firmware_status);
844 return -EPERM;
845 }
846 if (down_interruptible(&chan->rsem))
847 return -ERESTARTSYS;
848
849 if ((chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL)) == NULL) {
850 printk(KERN_INFO "%s: cosa_read() - OOM\n", cosa->name);
851 up(&chan->rsem);
852 return -ENOMEM;
853 }
854
855 chan->rx_status = 0;
856 cosa_enable_rx(chan);
857 spin_lock_irqsave(&cosa->lock, flags);
858 add_wait_queue(&chan->rxwaitq, &wait);
859 while(!chan->rx_status) {
860 current->state = TASK_INTERRUPTIBLE;
861 spin_unlock_irqrestore(&cosa->lock, flags);
862 schedule();
863 spin_lock_irqsave(&cosa->lock, flags);
864 if (signal_pending(current) && chan->rx_status == 0) {
865 chan->rx_status = 1;
866 remove_wait_queue(&chan->rxwaitq, &wait);
867 current->state = TASK_RUNNING;
868 spin_unlock_irqrestore(&cosa->lock, flags);
869 up(&chan->rsem);
870 return -ERESTARTSYS;
871 }
872 }
873 remove_wait_queue(&chan->rxwaitq, &wait);
874 current->state = TASK_RUNNING;
875 kbuf = chan->rxdata;
876 count = chan->rxsize;
877 spin_unlock_irqrestore(&cosa->lock, flags);
878 up(&chan->rsem);
879
880 if (copy_to_user(buf, kbuf, count)) {
881 kfree(kbuf);
882 return -EFAULT;
883 }
884 kfree(kbuf);
885 return count;
886}
887
888static char *chrdev_setup_rx(struct channel_data *chan, int size)
889{
890 /* Expect size <= COSA_MTU */
891 chan->rxsize = size;
892 return chan->rxdata;
893}
894
895static int chrdev_rx_done(struct channel_data *chan)
896{
897 if (chan->rx_status) { /* Reader has died */
898 kfree(chan->rxdata);
899 up(&chan->wsem);
900 }
901 chan->rx_status = 1;
902 wake_up_interruptible(&chan->rxwaitq);
903 return 1;
904}
905
906
907static ssize_t cosa_write(struct file *file,
908 const char __user *buf, size_t count, loff_t *ppos)
909{
910 DECLARE_WAITQUEUE(wait, current);
911 struct channel_data *chan = file->private_data;
912 struct cosa_data *cosa = chan->cosa;
913 unsigned long flags;
914 char *kbuf;
915
916 if (!(cosa->firmware_status & COSA_FW_START)) {
917 printk(KERN_NOTICE "%s: start the firmware first (status %d)\n",
918 cosa->name, cosa->firmware_status);
919 return -EPERM;
920 }
921 if (down_interruptible(&chan->wsem))
922 return -ERESTARTSYS;
923
924 if (count > COSA_MTU)
925 count = COSA_MTU;
926
927 /* Allocate the buffer */
928 if ((kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA)) == NULL) {
929 printk(KERN_NOTICE "%s: cosa_write() OOM - dropping packet\n",
930 cosa->name);
931 up(&chan->wsem);
932 return -ENOMEM;
933 }
934 if (copy_from_user(kbuf, buf, count)) {
935 up(&chan->wsem);
936 kfree(kbuf);
937 return -EFAULT;
938 }
939 chan->tx_status=0;
940 cosa_start_tx(chan, kbuf, count);
941
942 spin_lock_irqsave(&cosa->lock, flags);
943 add_wait_queue(&chan->txwaitq, &wait);
944 while(!chan->tx_status) {
945 current->state = TASK_INTERRUPTIBLE;
946 spin_unlock_irqrestore(&cosa->lock, flags);
947 schedule();
948 spin_lock_irqsave(&cosa->lock, flags);
949 if (signal_pending(current) && chan->tx_status == 0) {
950 chan->tx_status = 1;
951 remove_wait_queue(&chan->txwaitq, &wait);
952 current->state = TASK_RUNNING;
953 chan->tx_status = 1;
954 spin_unlock_irqrestore(&cosa->lock, flags);
955 return -ERESTARTSYS;
956 }
957 }
958 remove_wait_queue(&chan->txwaitq, &wait);
959 current->state = TASK_RUNNING;
960 up(&chan->wsem);
961 spin_unlock_irqrestore(&cosa->lock, flags);
962 kfree(kbuf);
963 return count;
964}
965
966static int chrdev_tx_done(struct channel_data *chan, int size)
967{
968 if (chan->tx_status) { /* Writer was interrupted */
969 kfree(chan->txbuf);
970 up(&chan->wsem);
971 }
972 chan->tx_status = 1;
973 wake_up_interruptible(&chan->txwaitq);
974 return 1;
975}
976
977static unsigned int cosa_poll(struct file *file, poll_table *poll)
978{
979 printk(KERN_INFO "cosa_poll is here\n");
980 return 0;
981}
982
983static int cosa_open(struct inode *inode, struct file *file)
984{
985 struct cosa_data *cosa;
986 struct channel_data *chan;
987 unsigned long flags;
988 int n;
989
990 if ((n=iminor(file->f_dentry->d_inode)>>CARD_MINOR_BITS)
991 >= nr_cards)
992 return -ENODEV;
993 cosa = cosa_cards+n;
994
995 if ((n=iminor(file->f_dentry->d_inode)
996 & ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels)
997 return -ENODEV;
998 chan = cosa->chan + n;
999
1000 file->private_data = chan;
1001
1002 spin_lock_irqsave(&cosa->lock, flags);
1003
1004 if (chan->usage < 0) { /* in netdev mode */
1005 spin_unlock_irqrestore(&cosa->lock, flags);
1006 return -EBUSY;
1007 }
1008 cosa->usage++;
1009 chan->usage++;
1010
1011 chan->tx_done = chrdev_tx_done;
1012 chan->setup_rx = chrdev_setup_rx;
1013 chan->rx_done = chrdev_rx_done;
1014 spin_unlock_irqrestore(&cosa->lock, flags);
1015 return 0;
1016}
1017
1018static int cosa_release(struct inode *inode, struct file *file)
1019{
1020 struct channel_data *channel = file->private_data;
1021 struct cosa_data *cosa;
1022 unsigned long flags;
1023
1024 cosa = channel->cosa;
1025 spin_lock_irqsave(&cosa->lock, flags);
1026 cosa->usage--;
1027 channel->usage--;
1028 spin_unlock_irqrestore(&cosa->lock, flags);
1029 return 0;
1030}
1031
1032#ifdef COSA_FASYNC_WORKING
1033static struct fasync_struct *fasync[256] = { NULL, };
1034
1035/* To be done ... */
1036static int cosa_fasync(struct inode *inode, struct file *file, int on)
1037{
1038 int port = iminor(inode);
1039 int rv = fasync_helper(inode, file, on, &fasync[port]);
1040 return rv < 0 ? rv : 0;
1041}
1042#endif
1043
1044
1045/* ---------- Ioctls ---------- */
1046
1047/*
1048 * Ioctl subroutines can safely be made inline, because they are called
1049 * only from cosa_ioctl().
1050 */
1051static inline int cosa_reset(struct cosa_data *cosa)
1052{
1053 char idstring[COSA_MAX_ID_STRING];
1054 if (cosa->usage > 1)
1055 printk(KERN_INFO "cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
1056 cosa->num, cosa->usage);
1057 cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_START);
1058 if (cosa_reset_and_read_id(cosa, idstring) < 0) {
1059 printk(KERN_NOTICE "cosa%d: reset failed\n", cosa->num);
1060 return -EIO;
1061 }
1062 printk(KERN_INFO "cosa%d: resetting device: %s\n", cosa->num,
1063 idstring);
1064 cosa->firmware_status |= COSA_FW_RESET;
1065 return 0;
1066}
1067
1068/* High-level function to download data into COSA memory. Calls download() */
1069static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
1070{
1071 struct cosa_download d;
1072 int i;
1073
1074 if (cosa->usage > 1)
1075 printk(KERN_INFO "%s: WARNING: download of microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
1076 cosa->name, cosa->usage);
1077 if (!(cosa->firmware_status & COSA_FW_RESET)) {
1078 printk(KERN_NOTICE "%s: reset the card first (status %d).\n",
1079 cosa->name, cosa->firmware_status);
1080 return -EPERM;
1081 }
1082
1083 if (copy_from_user(&d, arg, sizeof(d)))
1084 return -EFAULT;
1085
1086 if (d.addr < 0 || d.addr > COSA_MAX_FIRMWARE_SIZE)
1087 return -EINVAL;
1088 if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE)
1089 return -EINVAL;
1090
1091
1092 /* If something fails, force the user to reset the card */
1093 cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_DOWNLOAD);
1094
1095 i = download(cosa, d.code, d.len, d.addr);
1096 if (i < 0) {
1097 printk(KERN_NOTICE "cosa%d: microcode download failed: %d\n",
1098 cosa->num, i);
1099 return -EIO;
1100 }
1101 printk(KERN_INFO "cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
1102 cosa->num, d.len, d.addr);
1103 cosa->firmware_status |= COSA_FW_RESET|COSA_FW_DOWNLOAD;
1104 return 0;
1105}
1106
1107/* High-level function to read COSA memory. Calls readmem() */
1108static inline int cosa_readmem(struct cosa_data *cosa, void __user *arg)
1109{
1110 struct cosa_download d;
1111 int i;
1112
1113 if (cosa->usage > 1)
1114 printk(KERN_INFO "cosa%d: WARNING: readmem requested with "
1115 "cosa->usage > 1 (%d). Odd things may happen.\n",
1116 cosa->num, cosa->usage);
1117 if (!(cosa->firmware_status & COSA_FW_RESET)) {
1118 printk(KERN_NOTICE "%s: reset the card first (status %d).\n",
1119 cosa->name, cosa->firmware_status);
1120 return -EPERM;
1121 }
1122
1123 if (copy_from_user(&d, arg, sizeof(d)))
1124 return -EFAULT;
1125
1126 /* If something fails, force the user to reset the card */
1127 cosa->firmware_status &= ~COSA_FW_RESET;
1128
1129 i = readmem(cosa, d.code, d.len, d.addr);
1130 if (i < 0) {
1131 printk(KERN_NOTICE "cosa%d: reading memory failed: %d\n",
1132 cosa->num, i);
1133 return -EIO;
1134 }
1135 printk(KERN_INFO "cosa%d: reading card memory - 0x%04x bytes at 0x%04x\n",
1136 cosa->num, d.len, d.addr);
1137 cosa->firmware_status |= COSA_FW_RESET;
1138 return 0;
1139}
1140
1141/* High-level function to start microcode. Calls startmicrocode(). */
1142static inline int cosa_start(struct cosa_data *cosa, int address)
1143{
1144 int i;
1145
1146 if (cosa->usage > 1)
1147 printk(KERN_INFO "cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
1148 cosa->num, cosa->usage);
1149
1150 if ((cosa->firmware_status & (COSA_FW_RESET|COSA_FW_DOWNLOAD))
1151 != (COSA_FW_RESET|COSA_FW_DOWNLOAD)) {
1152 printk(KERN_NOTICE "%s: download the microcode and/or reset the card first (status %d).\n",
1153 cosa->name, cosa->firmware_status);
1154 return -EPERM;
1155 }
1156 cosa->firmware_status &= ~COSA_FW_RESET;
1157 if ((i=startmicrocode(cosa, address)) < 0) {
1158 printk(KERN_NOTICE "cosa%d: start microcode at 0x%04x failed: %d\n",
1159 cosa->num, address, i);
1160 return -EIO;
1161 }
1162 printk(KERN_INFO "cosa%d: starting microcode at 0x%04x\n",
1163 cosa->num, address);
1164 cosa->startaddr = address;
1165 cosa->firmware_status |= COSA_FW_START;
1166 return 0;
1167}
1168
1169/* Buffer of size at least COSA_MAX_ID_STRING is expected */
1170static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
1171{
1172 int l = strlen(cosa->id_string)+1;
1173 if (copy_to_user(string, cosa->id_string, l))
1174 return -EFAULT;
1175 return l;
1176}
1177
1178/* Buffer of size at least COSA_MAX_ID_STRING is expected */
1179static inline int cosa_gettype(struct cosa_data *cosa, char __user *string)
1180{
1181 int l = strlen(cosa->type)+1;
1182 if (copy_to_user(string, cosa->type, l))
1183 return -EFAULT;
1184 return l;
1185}
1186
1187static int cosa_ioctl_common(struct cosa_data *cosa,
1188 struct channel_data *channel, unsigned int cmd, unsigned long arg)
1189{
1190 void __user *argp = (void __user *)arg;
1191 switch(cmd) {
1192 case COSAIORSET: /* Reset the device */
1193 if (!capable(CAP_NET_ADMIN))
1194 return -EACCES;
1195 return cosa_reset(cosa);
1196 case COSAIOSTRT: /* Start the firmware */
1197 if (!capable(CAP_SYS_RAWIO))
1198 return -EACCES;
1199 return cosa_start(cosa, arg);
1200 case COSAIODOWNLD: /* Download the firmware */
1201 if (!capable(CAP_SYS_RAWIO))
1202 return -EACCES;
1203
1204 return cosa_download(cosa, argp);
1205 case COSAIORMEM:
1206 if (!capable(CAP_SYS_RAWIO))
1207 return -EACCES;
1208 return cosa_readmem(cosa, argp);
1209 case COSAIORTYPE:
1210 return cosa_gettype(cosa, argp);
1211 case COSAIORIDSTR:
1212 return cosa_getidstr(cosa, argp);
1213 case COSAIONRCARDS:
1214 return nr_cards;
1215 case COSAIONRCHANS:
1216 return cosa->nchannels;
1217 case COSAIOBMSET:
1218 if (!capable(CAP_SYS_RAWIO))
1219 return -EACCES;
1220 if (is_8bit(cosa))
1221 return -EINVAL;
1222 if (arg != COSA_BM_OFF && arg != COSA_BM_ON)
1223 return -EINVAL;
1224 cosa->busmaster = arg;
1225 return 0;
1226 case COSAIOBMGET:
1227 return cosa->busmaster;
1228 }
1229 return -ENOIOCTLCMD;
1230}
1231
1232static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr,
1233 int cmd)
1234{
1235 int rv;
1236 struct channel_data *chan = dev->priv;
1237 rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data);
1238 if (rv == -ENOIOCTLCMD) {
1239 return sppp_do_ioctl(dev, ifr, cmd);
1240 }
1241 return rv;
1242}
1243
1244static int cosa_chardev_ioctl(struct inode *inode, struct file *file,
1245 unsigned int cmd, unsigned long arg)
1246{
1247 struct channel_data *channel = file->private_data;
1248 struct cosa_data *cosa = channel->cosa;
1249 return cosa_ioctl_common(cosa, channel, cmd, arg);
1250}
1251
1252
1253/*---------- HW layer interface ---------- */
1254
1255/*
1256 * The higher layer can bind itself to the HW layer by setting the callbacks
1257 * in the channel_data structure and by using these routines.
1258 */
1259static void cosa_enable_rx(struct channel_data *chan)
1260{
1261 struct cosa_data *cosa = chan->cosa;
1262
1263 if (!test_and_set_bit(chan->num, &cosa->rxbitmap))
1264 put_driver_status(cosa);
1265}
1266
1267static void cosa_disable_rx(struct channel_data *chan)
1268{
1269 struct cosa_data *cosa = chan->cosa;
1270
1271 if (test_and_clear_bit(chan->num, &cosa->rxbitmap))
1272 put_driver_status(cosa);
1273}
1274
1275/*
1276 * FIXME: This routine probably should check for cosa_start_tx() called when
1277 * the previous transmit is still unfinished. In this case the non-zero
1278 * return value should indicate to the caller that the queuing(sp?) up
1279 * the transmit has failed.
1280 */
1281static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
1282{
1283 struct cosa_data *cosa = chan->cosa;
1284 unsigned long flags;
1285#ifdef DEBUG_DATA
1286 int i;
1287
1288 printk(KERN_INFO "cosa%dc%d: starting tx(0x%x)", chan->cosa->num,
1289 chan->num, len);
1290 for (i=0; i<len; i++)
1291 printk(" %02x", buf[i]&0xff);
1292 printk("\n");
1293#endif
1294 spin_lock_irqsave(&cosa->lock, flags);
1295 chan->txbuf = buf;
1296 chan->txsize = len;
1297 if (len > COSA_MTU)
1298 chan->txsize = COSA_MTU;
1299 spin_unlock_irqrestore(&cosa->lock, flags);
1300
1301 /* Tell the firmware we are ready */
1302 set_bit(chan->num, &cosa->txbitmap);
1303 put_driver_status(cosa);
1304
1305 return 0;
1306}
1307
1308static void put_driver_status(struct cosa_data *cosa)
1309{
1310 unsigned long flags;
1311 int status;
1312
1313 spin_lock_irqsave(&cosa->lock, flags);
1314
1315 status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
1316 | (cosa->txbitmap ? DRIVER_TX_READY : 0)
1317 | (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
1318 &DRIVER_TXMAP_MASK : 0);
1319 if (!cosa->rxtx) {
1320 if (cosa->rxbitmap|cosa->txbitmap) {
1321 if (!cosa->enabled) {
1322 cosa_putstatus(cosa, SR_RX_INT_ENA);
1323#ifdef DEBUG_IO
1324 debug_status_out(cosa, SR_RX_INT_ENA);
1325#endif
1326 cosa->enabled = 1;
1327 }
1328 } else if (cosa->enabled) {
1329 cosa->enabled = 0;
1330 cosa_putstatus(cosa, 0);
1331#ifdef DEBUG_IO
1332 debug_status_out(cosa, 0);
1333#endif
1334 }
1335 cosa_putdata8(cosa, status);
1336#ifdef DEBUG_IO
1337 debug_data_cmd(cosa, status);
1338#endif
1339 }
1340 spin_unlock_irqrestore(&cosa->lock, flags);
1341}
1342
1343static void put_driver_status_nolock(struct cosa_data *cosa)
1344{
1345 int status;
1346
1347 status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
1348 | (cosa->txbitmap ? DRIVER_TX_READY : 0)
1349 | (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
1350 &DRIVER_TXMAP_MASK : 0);
1351
1352 if (cosa->rxbitmap|cosa->txbitmap) {
1353 cosa_putstatus(cosa, SR_RX_INT_ENA);
1354#ifdef DEBUG_IO
1355 debug_status_out(cosa, SR_RX_INT_ENA);
1356#endif
1357 cosa->enabled = 1;
1358 } else {
1359 cosa_putstatus(cosa, 0);
1360#ifdef DEBUG_IO
1361 debug_status_out(cosa, 0);
1362#endif
1363 cosa->enabled = 0;
1364 }
1365 cosa_putdata8(cosa, status);
1366#ifdef DEBUG_IO
1367 debug_data_cmd(cosa, status);
1368#endif
1369}
1370
1371/*
1372 * The "kickme" function: When the DMA times out, this is called to
1373 * clean up the driver status.
1374 * FIXME: Preliminary support, the interface is probably wrong.
1375 */
1376static void cosa_kick(struct cosa_data *cosa)
1377{
1378 unsigned long flags, flags1;
1379 char *s = "(probably) IRQ";
1380
1381 if (test_bit(RXBIT, &cosa->rxtx))
1382 s = "RX DMA";
1383 if (test_bit(TXBIT, &cosa->rxtx))
1384 s = "TX DMA";
1385
1386 printk(KERN_INFO "%s: %s timeout - restarting.\n", cosa->name, s);
1387 spin_lock_irqsave(&cosa->lock, flags);
1388 cosa->rxtx = 0;
1389
1390 flags1 = claim_dma_lock();
1391 disable_dma(cosa->dma);
1392 clear_dma_ff(cosa->dma);
1393 release_dma_lock(flags1);
1394
1395 /* FIXME: Anything else? */
1396 udelay(100);
1397 cosa_putstatus(cosa, 0);
1398 udelay(100);
1399 (void) cosa_getdata8(cosa);
1400 udelay(100);
1401 cosa_putdata8(cosa, 0);
1402 udelay(100);
1403 put_driver_status_nolock(cosa);
1404 spin_unlock_irqrestore(&cosa->lock, flags);
1405}
1406
1407/*
1408 * Check if the whole buffer is DMA-able. It means it is below the 16M of
1409 * physical memory and doesn't span the 64k boundary. For now it seems
1410 * SKB's never do this, but we'll check this anyway.
1411 */
1412static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
1413{
1414 static int count;
1415 unsigned long b = (unsigned long)buf;
1416 if (b+len >= MAX_DMA_ADDRESS)
1417 return 0;
1418 if ((b^ (b+len)) & 0x10000) {
1419 if (count++ < 5)
1420 printk(KERN_INFO "%s: packet spanning a 64k boundary\n",
1421 chan->name);
1422 return 0;
1423 }
1424 return 1;
1425}
1426
1427
1428/* ---------- The SRP/COSA ROM monitor functions ---------- */
1429
1430/*
1431 * Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
1432 * drivers need to say 4-digit hex number meaning start address of the microcode
1433 * separated by a single space. Monitor replies by saying " =". Now driver
1434 * has to write 4-digit hex number meaning the last byte address ended
1435 * by a single space. Monitor has to reply with a space. Now the download
1436 * begins. After the download monitor replies with "\r\n." (CR LF dot).
1437 */
1438static int download(struct cosa_data *cosa, const char __user *microcode, int length, int address)
1439{
1440 int i;
1441
1442 if (put_wait_data(cosa, 'w') == -1) return -1;
1443 if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;}
1444 if (get_wait_data(cosa) != '=') return -3;
1445
1446 if (puthexnumber(cosa, address) < 0) return -4;
1447 if (put_wait_data(cosa, ' ') == -1) return -10;
1448 if (get_wait_data(cosa) != ' ') return -11;
1449 if (get_wait_data(cosa) != '=') return -12;
1450
1451 if (puthexnumber(cosa, address+length-1) < 0) return -13;
1452 if (put_wait_data(cosa, ' ') == -1) return -18;
1453 if (get_wait_data(cosa) != ' ') return -19;
1454
1455 while (length--) {
1456 char c;
1457#ifndef SRP_DOWNLOAD_AT_BOOT
1458 if (get_user(c, microcode))
1459 return -23; /* ??? */
1460#else
1461 c = *microcode;
1462#endif
1463 if (put_wait_data(cosa, c) == -1)
1464 return -20;
1465 microcode++;
1466 }
1467
1468 if (get_wait_data(cosa) != '\r') return -21;
1469 if (get_wait_data(cosa) != '\n') return -22;
1470 if (get_wait_data(cosa) != '.') return -23;
1471#if 0
1472 printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num);
1473#endif
1474 return 0;
1475}
1476
1477
1478/*
1479 * Starting microcode is done via the "g" command of the SRP monitor.
1480 * The chat should be the following: "g" "g=" "<addr><CR>"
1481 * "<CR><CR><LF><CR><LF>".
1482 */
1483static int startmicrocode(struct cosa_data *cosa, int address)
1484{
1485 if (put_wait_data(cosa, 'g') == -1) return -1;
1486 if (get_wait_data(cosa) != 'g') return -2;
1487 if (get_wait_data(cosa) != '=') return -3;
1488
1489 if (puthexnumber(cosa, address) < 0) return -4;
1490 if (put_wait_data(cosa, '\r') == -1) return -5;
1491
1492 if (get_wait_data(cosa) != '\r') return -6;
1493 if (get_wait_data(cosa) != '\r') return -7;
1494 if (get_wait_data(cosa) != '\n') return -8;
1495 if (get_wait_data(cosa) != '\r') return -9;
1496 if (get_wait_data(cosa) != '\n') return -10;
1497#if 0
1498 printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num);
1499#endif
1500 return 0;
1501}
1502
1503/*
1504 * Reading memory is done via the "r" command of the SRP monitor.
1505 * The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " "
1506 * Then driver can read the data and the conversation is finished
1507 * by SRP monitor sending "<CR><LF>." (dot at the end).
1508 *
1509 * This routine is not needed during the normal operation and serves
1510 * for debugging purposes only.
1511 */
1512static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address)
1513{
1514 if (put_wait_data(cosa, 'r') == -1) return -1;
1515 if ((get_wait_data(cosa)) != 'r') return -2;
1516 if ((get_wait_data(cosa)) != '=') return -3;
1517
1518 if (puthexnumber(cosa, address) < 0) return -4;
1519 if (put_wait_data(cosa, ' ') == -1) return -5;
1520 if (get_wait_data(cosa) != ' ') return -6;
1521 if (get_wait_data(cosa) != '=') return -7;
1522
1523 if (puthexnumber(cosa, address+length-1) < 0) return -8;
1524 if (put_wait_data(cosa, ' ') == -1) return -9;
1525 if (get_wait_data(cosa) != ' ') return -10;
1526
1527 while (length--) {
1528 char c;
1529 int i;
1530 if ((i=get_wait_data(cosa)) == -1) {
1531 printk (KERN_INFO "cosa: 0x%04x bytes remaining\n",
1532 length);
1533 return -11;
1534 }
1535 c=i;
1536#if 1
1537 if (put_user(c, microcode))
1538 return -23; /* ??? */
1539#else
1540 *microcode = c;
1541#endif
1542 microcode++;
1543 }
1544
1545 if (get_wait_data(cosa) != '\r') return -21;
1546 if (get_wait_data(cosa) != '\n') return -22;
1547 if (get_wait_data(cosa) != '.') return -23;
1548#if 0
1549 printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num);
1550#endif
1551 return 0;
1552}
1553
1554/*
1555 * This function resets the device and reads the initial prompt
1556 * of the device's ROM monitor.
1557 */
1558static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
1559{
1560 int i=0, id=0, prev=0, curr=0;
1561
1562 /* Reset the card ... */
1563 cosa_putstatus(cosa, 0);
1564 cosa_getdata8(cosa);
1565 cosa_putstatus(cosa, SR_RST);
1566#ifdef MODULE
1567 msleep(500);
1568#else
1569 udelay(5*100000);
1570#endif
1571 /* Disable all IRQs from the card */
1572 cosa_putstatus(cosa, 0);
1573
1574 /*
1575 * Try to read the ID string. The card then prints out the
1576 * identification string ended by the "\n\x2e".
1577 *
1578 * The following loop is indexed through i (instead of id)
1579 * to avoid looping forever when for any reason
1580 * the port returns '\r', '\n' or '\x2e' permanently.
1581 */
1582 for (i=0; i<COSA_MAX_ID_STRING-1; i++, prev=curr) {
1583 if ((curr = get_wait_data(cosa)) == -1) {
1584 return -1;
1585 }
1586 curr &= 0xff;
1587 if (curr != '\r' && curr != '\n' && curr != 0x2e)
1588 idstring[id++] = curr;
1589 if (curr == 0x2e && prev == '\n')
1590 break;
1591 }
1592 /* Perhaps we should fail when i==COSA_MAX_ID_STRING-1 ? */
1593 idstring[id] = '\0';
1594 return id;
1595}
1596
1597
1598/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */
1599
1600/*
1601 * This routine gets the data byte from the card waiting for the SR_RX_RDY
1602 * bit to be set in a loop. It should be used in the exceptional cases
1603 * only (for example when resetting the card or downloading the firmware.
1604 */
1605static int get_wait_data(struct cosa_data *cosa)
1606{
1607 int retries = 1000;
1608
1609 while (--retries) {
1610 /* read data and return them */
1611 if (cosa_getstatus(cosa) & SR_RX_RDY) {
1612 short r;
1613 r = cosa_getdata8(cosa);
1614#if 0
1615 printk(KERN_INFO "cosa: get_wait_data returning after %d retries\n", 999-retries);
1616#endif
1617 return r;
1618 }
1619 /* sleep if not ready to read */
1620 set_current_state(TASK_INTERRUPTIBLE);
1621 schedule_timeout(1);
1622 }
1623 printk(KERN_INFO "cosa: timeout in get_wait_data (status 0x%x)\n",
1624 cosa_getstatus(cosa));
1625 return -1;
1626}
1627
1628/*
1629 * This routine puts the data byte to the card waiting for the SR_TX_RDY
1630 * bit to be set in a loop. It should be used in the exceptional cases
1631 * only (for example when resetting the card or downloading the firmware).
1632 */
1633static int put_wait_data(struct cosa_data *cosa, int data)
1634{
1635 int retries = 1000;
1636 while (--retries) {
1637 /* read data and return them */
1638 if (cosa_getstatus(cosa) & SR_TX_RDY) {
1639 cosa_putdata8(cosa, data);
1640#if 0
1641 printk(KERN_INFO "Putdata: %d retries\n", 999-retries);
1642#endif
1643 return 0;
1644 }
1645#if 0
1646 /* sleep if not ready to read */
1647 current->state = TASK_INTERRUPTIBLE;
1648 schedule_timeout(1);
1649#endif
1650 }
1651 printk(KERN_INFO "cosa%d: timeout in put_wait_data (status 0x%x)\n",
1652 cosa->num, cosa_getstatus(cosa));
1653 return -1;
1654}
1655
1656/*
1657 * The following routine puts the hexadecimal number into the SRP monitor
1658 * and verifies the proper echo of the sent bytes. Returns 0 on success,
1659 * negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed,
1660 * (-2,-4,-6,-8) means that reading echo failed.
1661 */
1662static int puthexnumber(struct cosa_data *cosa, int number)
1663{
1664 char temp[5];
1665 int i;
1666
1667 /* Well, I should probably replace this by something faster. */
1668 sprintf(temp, "%04X", number);
1669 for (i=0; i<4; i++) {
1670 if (put_wait_data(cosa, temp[i]) == -1) {
1671 printk(KERN_NOTICE "cosa%d: puthexnumber failed to write byte %d\n",
1672 cosa->num, i);
1673 return -1-2*i;
1674 }
1675 if (get_wait_data(cosa) != temp[i]) {
1676 printk(KERN_NOTICE "cosa%d: puthexhumber failed to read echo of byte %d\n",
1677 cosa->num, i);
1678 return -2-2*i;
1679 }
1680 }
1681 return 0;
1682}
1683
1684
1685/* ---------- Interrupt routines ---------- */
1686
1687/*
1688 * There are three types of interrupt:
1689 * At the beginning of transmit - this handled is in tx_interrupt(),
1690 * at the beginning of receive - it is in rx_interrupt() and
1691 * at the end of transmit/receive - it is the eot_interrupt() function.
1692 * These functions are multiplexed by cosa_interrupt() according to the
1693 * COSA status byte. I have moved the rx/tx/eot interrupt handling into
1694 * separate functions to make it more readable. These functions are inline,
1695 * so there should be no overhead of function call.
1696 *
1697 * In the COSA bus-master mode, we need to tell the card the address of a
1698 * buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait.
1699 * It's time to use the bottom half :-(
1700 */
1701
1702/*
1703 * Transmit interrupt routine - called when COSA is willing to obtain
1704 * data from the OS. The most tricky part of the routine is selection
1705 * of channel we (OS) want to send packet for. For SRP we should probably
1706 * use the round-robin approach. The newer COSA firmwares have a simple
1707 * flow-control - in the status word has bits 2 and 3 set to 1 means that the
1708 * channel 0 or 1 doesn't want to receive data.
1709 *
1710 * It seems there is a bug in COSA firmware (need to trace it further):
1711 * When the driver status says that the kernel has no more data for transmit
1712 * (e.g. at the end of TX DMA) and then the kernel changes its mind
1713 * (e.g. new packet is queued to hard_start_xmit()), the card issues
1714 * the TX interrupt but does not mark the channel as ready-to-transmit.
1715 * The fix seems to be to push the packet to COSA despite its request.
1716 * We first try to obey the card's opinion, and then fall back to forced TX.
1717 */
1718static inline void tx_interrupt(struct cosa_data *cosa, int status)
1719{
1720 unsigned long flags, flags1;
1721#ifdef DEBUG_IRQS
1722 printk(KERN_INFO "cosa%d: SR_DOWN_REQUEST status=0x%04x\n",
1723 cosa->num, status);
1724#endif
1725 spin_lock_irqsave(&cosa->lock, flags);
1726 set_bit(TXBIT, &cosa->rxtx);
1727 if (!test_bit(IRQBIT, &cosa->rxtx)) {
1728 /* flow control, see the comment above */
1729 int i=0;
1730 if (!cosa->txbitmap) {
1731 printk(KERN_WARNING "%s: No channel wants data "
1732 "in TX IRQ. Expect DMA timeout.",
1733 cosa->name);
1734 put_driver_status_nolock(cosa);
1735 clear_bit(TXBIT, &cosa->rxtx);
1736 spin_unlock_irqrestore(&cosa->lock, flags);
1737 return;
1738 }
1739 while(1) {
1740 cosa->txchan++;
1741 i++;
1742 if (cosa->txchan >= cosa->nchannels)
1743 cosa->txchan = 0;
1744 if (!(cosa->txbitmap & (1<<cosa->txchan)))
1745 continue;
1746 if (~status & (1 << (cosa->txchan+DRIVER_TXMAP_SHIFT)))
1747 break;
1748 /* in second pass, accept first ready-to-TX channel */
1749 if (i > cosa->nchannels) {
1750 /* Can be safely ignored */
1751#ifdef DEBUG_IRQS
1752 printk(KERN_DEBUG "%s: Forcing TX "
1753 "to not-ready channel %d\n",
1754 cosa->name, cosa->txchan);
1755#endif
1756 break;
1757 }
1758 }
1759
1760 cosa->txsize = cosa->chan[cosa->txchan].txsize;
1761 if (cosa_dma_able(cosa->chan+cosa->txchan,
1762 cosa->chan[cosa->txchan].txbuf, cosa->txsize)) {
1763 cosa->txbuf = cosa->chan[cosa->txchan].txbuf;
1764 } else {
1765 memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf,
1766 cosa->txsize);
1767 cosa->txbuf = cosa->bouncebuf;
1768 }
1769 }
1770
1771 if (is_8bit(cosa)) {
1772 if (!test_bit(IRQBIT, &cosa->rxtx)) {
1773 cosa_putstatus(cosa, SR_TX_INT_ENA);
1774 cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0)|
1775 ((cosa->txsize >> 8) & 0x1f));
1776#ifdef DEBUG_IO
1777 debug_status_out(cosa, SR_TX_INT_ENA);
1778 debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0)|
1779 ((cosa->txsize >> 8) & 0x1f));
1780 debug_data_in(cosa, cosa_getdata8(cosa));
1781#else
1782 cosa_getdata8(cosa);
1783#endif
1784 set_bit(IRQBIT, &cosa->rxtx);
1785 spin_unlock_irqrestore(&cosa->lock, flags);
1786 return;
1787 } else {
1788 clear_bit(IRQBIT, &cosa->rxtx);
1789 cosa_putstatus(cosa, 0);
1790 cosa_putdata8(cosa, cosa->txsize&0xff);
1791#ifdef DEBUG_IO
1792 debug_status_out(cosa, 0);
1793 debug_data_out(cosa, cosa->txsize&0xff);
1794#endif
1795 }
1796 } else {
1797 cosa_putstatus(cosa, SR_TX_INT_ENA);
1798 cosa_putdata16(cosa, ((cosa->txchan<<13) & 0xe000)
1799 | (cosa->txsize & 0x1fff));
1800#ifdef DEBUG_IO
1801 debug_status_out(cosa, SR_TX_INT_ENA);
1802 debug_data_out(cosa, ((cosa->txchan<<13) & 0xe000)
1803 | (cosa->txsize & 0x1fff));
1804 debug_data_in(cosa, cosa_getdata8(cosa));
1805 debug_status_out(cosa, 0);
1806#else
1807 cosa_getdata8(cosa);
1808#endif
1809 cosa_putstatus(cosa, 0);
1810 }
1811
1812 if (cosa->busmaster) {
1813 unsigned long addr = virt_to_bus(cosa->txbuf);
1814 int count=0;
1815 printk(KERN_INFO "busmaster IRQ\n");
1816 while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
1817 count++;
1818 udelay(10);
1819 if (count > 1000) break;
1820 }
1821 printk(KERN_INFO "status %x\n", cosa_getstatus(cosa));
1822 printk(KERN_INFO "ready after %d loops\n", count);
1823 cosa_putdata16(cosa, (addr >> 16)&0xffff);
1824
1825 count = 0;
1826 while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
1827 count++;
1828 if (count > 1000) break;
1829 udelay(10);
1830 }
1831 printk(KERN_INFO "ready after %d loops\n", count);
1832 cosa_putdata16(cosa, addr &0xffff);
1833 flags1 = claim_dma_lock();
1834 set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
1835 enable_dma(cosa->dma);
1836 release_dma_lock(flags1);
1837 } else {
1838 /* start the DMA */
1839 flags1 = claim_dma_lock();
1840 disable_dma(cosa->dma);
1841 clear_dma_ff(cosa->dma);
1842 set_dma_mode(cosa->dma, DMA_MODE_WRITE);
1843 set_dma_addr(cosa->dma, virt_to_bus(cosa->txbuf));
1844 set_dma_count(cosa->dma, cosa->txsize);
1845 enable_dma(cosa->dma);
1846 release_dma_lock(flags1);
1847 }
1848 cosa_putstatus(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
1849#ifdef DEBUG_IO
1850 debug_status_out(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
1851#endif
1852 spin_unlock_irqrestore(&cosa->lock, flags);
1853}
1854
1855static inline void rx_interrupt(struct cosa_data *cosa, int status)
1856{
1857 unsigned long flags;
1858#ifdef DEBUG_IRQS
1859 printk(KERN_INFO "cosa%d: SR_UP_REQUEST\n", cosa->num);
1860#endif
1861
1862 spin_lock_irqsave(&cosa->lock, flags);
1863 set_bit(RXBIT, &cosa->rxtx);
1864
1865 if (is_8bit(cosa)) {
1866 if (!test_bit(IRQBIT, &cosa->rxtx)) {
1867 set_bit(IRQBIT, &cosa->rxtx);
1868 put_driver_status_nolock(cosa);
1869 cosa->rxsize = cosa_getdata8(cosa) <<8;
1870#ifdef DEBUG_IO
1871 debug_data_in(cosa, cosa->rxsize >> 8);
1872#endif
1873 spin_unlock_irqrestore(&cosa->lock, flags);
1874 return;
1875 } else {
1876 clear_bit(IRQBIT, &cosa->rxtx);
1877 cosa->rxsize |= cosa_getdata8(cosa) & 0xff;
1878#ifdef DEBUG_IO
1879 debug_data_in(cosa, cosa->rxsize & 0xff);
1880#endif
1881#if 0
1882 printk(KERN_INFO "cosa%d: receive rxsize = (0x%04x).\n",
1883 cosa->num, cosa->rxsize);
1884#endif
1885 }
1886 } else {
1887 cosa->rxsize = cosa_getdata16(cosa);
1888#ifdef DEBUG_IO
1889 debug_data_in(cosa, cosa->rxsize);
1890#endif
1891#if 0
1892 printk(KERN_INFO "cosa%d: receive rxsize = (0x%04x).\n",
1893 cosa->num, cosa->rxsize);
1894#endif
1895 }
1896 if (((cosa->rxsize & 0xe000) >> 13) >= cosa->nchannels) {
1897 printk(KERN_WARNING "%s: rx for unknown channel (0x%04x)\n",
1898 cosa->name, cosa->rxsize);
1899 spin_unlock_irqrestore(&cosa->lock, flags);
1900 goto reject;
1901 }
1902 cosa->rxchan = cosa->chan + ((cosa->rxsize & 0xe000) >> 13);
1903 cosa->rxsize &= 0x1fff;
1904 spin_unlock_irqrestore(&cosa->lock, flags);
1905
1906 cosa->rxbuf = NULL;
1907 if (cosa->rxchan->setup_rx)
1908 cosa->rxbuf = cosa->rxchan->setup_rx(cosa->rxchan, cosa->rxsize);
1909
1910 if (!cosa->rxbuf) {
1911reject: /* Reject the packet */
1912 printk(KERN_INFO "cosa%d: rejecting packet on channel %d\n",
1913 cosa->num, cosa->rxchan->num);
1914 cosa->rxbuf = cosa->bouncebuf;
1915 }
1916
1917 /* start the DMA */
1918 flags = claim_dma_lock();
1919 disable_dma(cosa->dma);
1920 clear_dma_ff(cosa->dma);
1921 set_dma_mode(cosa->dma, DMA_MODE_READ);
1922 if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff)) {
1923 set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf));
1924 } else {
1925 set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf));
1926 }
1927 set_dma_count(cosa->dma, (cosa->rxsize&0x1fff));
1928 enable_dma(cosa->dma);
1929 release_dma_lock(flags);
1930 spin_lock_irqsave(&cosa->lock, flags);
1931 cosa_putstatus(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
1932 if (!is_8bit(cosa) && (status & SR_TX_RDY))
1933 cosa_putdata8(cosa, DRIVER_RX_READY);
1934#ifdef DEBUG_IO
1935 debug_status_out(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
1936 if (!is_8bit(cosa) && (status & SR_TX_RDY))
1937 debug_data_cmd(cosa, DRIVER_RX_READY);
1938#endif
1939 spin_unlock_irqrestore(&cosa->lock, flags);
1940}
1941
1942static inline void eot_interrupt(struct cosa_data *cosa, int status)
1943{
1944 unsigned long flags, flags1;
1945 spin_lock_irqsave(&cosa->lock, flags);
1946 flags1 = claim_dma_lock();
1947 disable_dma(cosa->dma);
1948 clear_dma_ff(cosa->dma);
1949 release_dma_lock(flags1);
1950 if (test_bit(TXBIT, &cosa->rxtx)) {
1951 struct channel_data *chan = cosa->chan+cosa->txchan;
1952 if (chan->tx_done)
1953 if (chan->tx_done(chan, cosa->txsize))
1954 clear_bit(chan->num, &cosa->txbitmap);
1955 } else if (test_bit(RXBIT, &cosa->rxtx)) {
1956#ifdef DEBUG_DATA
1957 {
1958 int i;
1959 printk(KERN_INFO "cosa%dc%d: done rx(0x%x)", cosa->num,
1960 cosa->rxchan->num, cosa->rxsize);
1961 for (i=0; i<cosa->rxsize; i++)
1962 printk (" %02x", cosa->rxbuf[i]&0xff);
1963 printk("\n");
1964 }
1965#endif
1966 /* Packet for unknown channel? */
1967 if (cosa->rxbuf == cosa->bouncebuf)
1968 goto out;
1969 if (!cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize))
1970 memcpy(cosa->rxbuf, cosa->bouncebuf, cosa->rxsize);
1971 if (cosa->rxchan->rx_done)
1972 if (cosa->rxchan->rx_done(cosa->rxchan))
1973 clear_bit(cosa->rxchan->num, &cosa->rxbitmap);
1974 } else {
1975 printk(KERN_NOTICE "cosa%d: unexpected EOT interrupt\n",
1976 cosa->num);
1977 }
1978 /*
1979 * Clear the RXBIT, TXBIT and IRQBIT (the latest should be
1980 * cleared anyway). We should do it as soon as possible
1981 * so that we can tell the COSA we are done and to give it a time
1982 * for recovery.
1983 */
1984out:
1985 cosa->rxtx = 0;
1986 put_driver_status_nolock(cosa);
1987 spin_unlock_irqrestore(&cosa->lock, flags);
1988}
1989
1990static irqreturn_t cosa_interrupt(int irq, void *cosa_, struct pt_regs *regs)
1991{
1992 unsigned status;
1993 int count = 0;
1994 struct cosa_data *cosa = cosa_;
1995again:
1996 status = cosa_getstatus(cosa);
1997#ifdef DEBUG_IRQS
1998 printk(KERN_INFO "cosa%d: got IRQ, status 0x%02x\n", cosa->num,
1999 status & 0xff);
2000#endif
2001#ifdef DEBUG_IO
2002 debug_status_in(cosa, status);
2003#endif
2004 switch (status & SR_CMD_FROM_SRP_MASK) {
2005 case SR_DOWN_REQUEST:
2006 tx_interrupt(cosa, status);
2007 break;
2008 case SR_UP_REQUEST:
2009 rx_interrupt(cosa, status);
2010 break;
2011 case SR_END_OF_TRANSFER:
2012 eot_interrupt(cosa, status);
2013 break;
2014 default:
2015 /* We may be too fast for SRP. Try to wait a bit more. */
2016 if (count++ < 100) {
2017 udelay(100);
2018 goto again;
2019 }
2020 printk(KERN_INFO "cosa%d: unknown status 0x%02x in IRQ after %d retries\n",
2021 cosa->num, status & 0xff, count);
2022 }
2023#ifdef DEBUG_IRQS
2024 if (count)
2025 printk(KERN_INFO "%s: %d-times got unknown status in IRQ\n",
2026 cosa->name, count);
2027 else
2028 printk(KERN_INFO "%s: returning from IRQ\n", cosa->name);
2029#endif
2030 return IRQ_HANDLED;
2031}
2032
2033
2034/* ---------- I/O debugging routines ---------- */
2035/*
2036 * These routines can be used to monitor COSA/SRP I/O and to printk()
2037 * the data being transferred on the data and status I/O port in a
2038 * readable way.
2039 */
2040
2041#ifdef DEBUG_IO
2042static void debug_status_in(struct cosa_data *cosa, int status)
2043{
2044 char *s;
2045 switch(status & SR_CMD_FROM_SRP_MASK) {
2046 case SR_UP_REQUEST:
2047 s = "RX_REQ";
2048 break;
2049 case SR_DOWN_REQUEST:
2050 s = "TX_REQ";
2051 break;
2052 case SR_END_OF_TRANSFER:
2053 s = "ET_REQ";
2054 break;
2055 default:
2056 s = "NO_REQ";
2057 break;
2058 }
2059 printk(KERN_INFO "%s: IO: status -> 0x%02x (%s%s%s%s)\n",
2060 cosa->name,
2061 status,
2062 status & SR_USR_RQ ? "USR_RQ|":"",
2063 status & SR_TX_RDY ? "TX_RDY|":"",
2064 status & SR_RX_RDY ? "RX_RDY|":"",
2065 s);
2066}
2067
2068static void debug_status_out(struct cosa_data *cosa, int status)
2069{
2070 printk(KERN_INFO "%s: IO: status <- 0x%02x (%s%s%s%s%s%s)\n",
2071 cosa->name,
2072 status,
2073 status & SR_RX_DMA_ENA ? "RXDMA|":"!rxdma|",
2074 status & SR_TX_DMA_ENA ? "TXDMA|":"!txdma|",
2075 status & SR_RST ? "RESET|":"",
2076 status & SR_USR_INT_ENA ? "USRINT|":"!usrint|",
2077 status & SR_TX_INT_ENA ? "TXINT|":"!txint|",
2078 status & SR_RX_INT_ENA ? "RXINT":"!rxint");
2079}
2080
2081static void debug_data_in(struct cosa_data *cosa, int data)
2082{
2083 printk(KERN_INFO "%s: IO: data -> 0x%04x\n", cosa->name, data);
2084}
2085
2086static void debug_data_out(struct cosa_data *cosa, int data)
2087{
2088 printk(KERN_INFO "%s: IO: data <- 0x%04x\n", cosa->name, data);
2089}
2090
2091static void debug_data_cmd(struct cosa_data *cosa, int data)
2092{
2093 printk(KERN_INFO "%s: IO: data <- 0x%04x (%s|%s)\n",
2094 cosa->name, data,
2095 data & SR_RDY_RCV ? "RX_RDY" : "!rx_rdy",
2096 data & SR_RDY_SND ? "TX_RDY" : "!tx_rdy");
2097}
2098#endif
2099
2100/* EOF -- this file has not been truncated */
diff --git a/drivers/net/wan/cosa.h b/drivers/net/wan/cosa.h
new file mode 100644
index 000000000000..028f3d96b971
--- /dev/null
+++ b/drivers/net/wan/cosa.h
@@ -0,0 +1,117 @@
1/* $Id: cosa.h,v 1.6 1999/01/06 14:02:44 kas Exp $ */
2
3/*
4 * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#ifndef COSA_H__
22#define COSA_H__
23
24#include <linux/ioctl.h>
25
26#ifdef __KERNEL__
27/* status register - output bits */
28#define SR_RX_DMA_ENA 0x04 /* receiver DMA enable bit */
29#define SR_TX_DMA_ENA 0x08 /* transmitter DMA enable bit */
30#define SR_RST 0x10 /* SRP reset */
31#define SR_USR_INT_ENA 0x20 /* user interrupt enable bit */
32#define SR_TX_INT_ENA 0x40 /* transmitter interrupt enable bit */
33#define SR_RX_INT_ENA 0x80 /* receiver interrupt enable bit */
34
35/* status register - input bits */
36#define SR_USR_RQ 0x20 /* user interrupt request pending */
37#define SR_TX_RDY 0x40 /* transmitter empty (ready) */
38#define SR_RX_RDY 0x80 /* receiver data ready */
39
40#define SR_UP_REQUEST 0x02 /* request from SRP to transfer data
41 up to PC */
42#define SR_DOWN_REQUEST 0x01 /* SRP is able to transfer data down
43 from PC to SRP */
44#define SR_END_OF_TRANSFER 0x03 /* SRP signalize end of
45 transfer (up or down) */
46
47#define SR_CMD_FROM_SRP_MASK 0x03 /* mask to get SRP command */
48
49/* bits in driver status byte definitions : */
50#define SR_RDY_RCV 0x01 /* ready to receive packet */
51#define SR_RDY_SND 0x02 /* ready to send packet */
52#define SR_CMD_PND 0x04 /* command pending */ /* not currently used */
53
54/* ???? */
55#define SR_PKT_UP 0x01 /* transfer of packet up in progress */
56#define SR_PKT_DOWN 0x02 /* transfer of packet down in progress */
57
58#endif /* __KERNEL__ */
59
60#define SR_LOAD_ADDR 0x4400 /* SRP microcode load address */
61#define SR_START_ADDR 0x4400 /* SRP microcode start address */
62
63#define COSA_LOAD_ADDR 0x400 /* SRP microcode load address */
64#define COSA_MAX_FIRMWARE_SIZE 0x10000
65
66/* ioctls */
67struct cosa_download {
68 int addr, len;
69 char __user *code;
70};
71
72/* Reset the device */
73#define COSAIORSET _IO('C',0xf0)
74
75/* Start microcode at given address */
76#define COSAIOSTRT _IOW('C',0xf1, int)
77
78/* Read the block from the device memory */
79#define COSAIORMEM _IOWR('C',0xf2, struct cosa_download *)
80 /* actually the struct cosa_download itself; this is to keep
81 * the ioctl number same as in 2.4 in order to keep the user-space
82 * utils compatible. */
83
84/* Write the block to the device memory (i.e. download the microcode) */
85#define COSAIODOWNLD _IOW('C',0xf2, struct cosa_download *)
86 /* actually the struct cosa_download itself; this is to keep
87 * the ioctl number same as in 2.4 in order to keep the user-space
88 * utils compatible. */
89
90/* Read the device type (one of "srp", "cosa", and "cosa8" for now) */
91#define COSAIORTYPE _IOR('C',0xf3, char *)
92
93/* Read the device identification string */
94#define COSAIORIDSTR _IOR('C',0xf4, char *)
95/* Maximum length of the identification string. */
96#define COSA_MAX_ID_STRING 128
97
98/* Increment/decrement the module usage count :-) */
99/* #define COSAIOMINC _IO('C',0xf5) */
100/* #define COSAIOMDEC _IO('C',0xf6) */
101
102/* Get the total number of cards installed */
103#define COSAIONRCARDS _IO('C',0xf7)
104
105/* Get the number of channels on this card */
106#define COSAIONRCHANS _IO('C',0xf8)
107
108/* Set the driver for the bus-master operations */
109#define COSAIOBMSET _IOW('C', 0xf9, unsigned short)
110
111#define COSA_BM_OFF 0 /* Bus-mastering off - use ISA DMA (default) */
112#define COSA_BM_ON 1 /* Bus-mastering on - faster but untested */
113
114/* Gets the busmaster status */
115#define COSAIOBMGET _IO('C', 0xfa)
116
117#endif /* !COSA_H__ */
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
new file mode 100644
index 000000000000..6e74af62ca08
--- /dev/null
+++ b/drivers/net/wan/cycx_drv.c
@@ -0,0 +1,586 @@
1/*
2* cycx_drv.c Cyclom 2X Support Module.
3*
4* This module is a library of common hardware specific
5* functions used by the Cyclades Cyclom 2X sync card.
6*
7* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8*
9* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
10*
11* Based on sdladrv.c by Gene Kozin <genek@compuserve.com>
12*
13* This program is free software; you can redistribute it and/or
14* modify it under the terms of the GNU General Public License
15* as published by the Free Software Foundation; either version
16* 2 of the License, or (at your option) any later version.
17* ============================================================================
18* 1999/11/11 acme set_current_state(TASK_INTERRUPTIBLE), code
19* cleanup
20* 1999/11/08 acme init_cyc2x deleted, doing nothing
21* 1999/11/06 acme back to read[bw], write[bw] and memcpy_to and
22* fromio to use dpmbase ioremaped
23* 1999/10/26 acme use isa_read[bw], isa_write[bw] & isa_memcpy_to
24* & fromio
25* 1999/10/23 acme cleanup to only supports cyclom2x: all the other
26* boards are no longer manufactured by cyclades,
27* if someone wants to support them... be my guest!
28* 1999/05/28 acme cycx_intack & cycx_intde gone for good
29* 1999/05/18 acme lots of unlogged work, submitting to Linus...
30* 1999/01/03 acme more judicious use of data types
31* 1999/01/03 acme judicious use of data types :>
32* cycx_inten trying to reset pending interrupts
33* from cyclom 2x - I think this isn't the way to
34* go, but for now...
35* 1999/01/02 acme cycx_intack ok, I think there's nothing to do
36* to ack an int in cycx_drv.c, only handle it in
37* cyx_isr (or in the other protocols: cyp_isr,
38* cyf_isr, when they get implemented.
39* Dec 31, 1998 acme cycx_data_boot & cycx_code_boot fixed, crossing
40* fingers to see x25_configure in cycx_x25.c
41* work... :)
42* Dec 26, 1998 acme load implementation fixed, seems to work! :)
43* cycx_2x_dpmbase_options with all the possible
44* DPM addresses (20).
45* cycx_intr implemented (test this!)
46* general code cleanup
47* Dec 8, 1998 Ivan Passos Cyclom-2X firmware load implementation.
48* Aug 8, 1998 acme Initial version.
49*/
50
51#include <linux/init.h> /* __init */
52#include <linux/module.h>
53#include <linux/kernel.h> /* printk(), and other useful stuff */
54#include <linux/stddef.h> /* offsetof(), etc. */
55#include <linux/errno.h> /* return codes */
56#include <linux/sched.h> /* for jiffies, HZ, etc. */
57#include <linux/cycx_drv.h> /* API definitions */
58#include <linux/cycx_cfm.h> /* CYCX firmware module definitions */
59#include <linux/delay.h> /* udelay */
60#include <asm/io.h> /* read[wl], write[wl], ioremap, iounmap */
61
62#define MOD_VERSION 0
63#define MOD_RELEASE 6
64
65MODULE_AUTHOR("Arnaldo Carvalho de Melo");
66MODULE_DESCRIPTION("Cyclom 2x Sync Card Driver");
67MODULE_LICENSE("GPL");
68
69/* Hardware-specific functions */
70static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len);
71static void cycx_bootcfg(struct cycx_hw *hw);
72
73static int reset_cyc2x(void __iomem *addr);
74static int detect_cyc2x(void __iomem *addr);
75
76/* Miscellaneous functions */
77static void delay_cycx(int sec);
78static int get_option_index(long *optlist, long optval);
79static u16 checksum(u8 *buf, u32 len);
80
81#define wait_cyc(addr) cycx_exec(addr + CMD_OFFSET)
82
83/* Global Data */
84
85/* private data */
86static char modname[] = "cycx_drv";
87static char fullname[] = "Cyclom 2X Support Module";
88static char copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
89 "<acme@conectiva.com.br>";
90
91/* Hardware configuration options.
92 * These are arrays of configuration options used by verification routines.
93 * The first element of each array is its size (i.e. number of options).
94 */
95static long cyc2x_dpmbase_options[] = {
96 20,
97 0xA0000, 0xA4000, 0xA8000, 0xAC000, 0xB0000, 0xB4000, 0xB8000,
98 0xBC000, 0xC0000, 0xC4000, 0xC8000, 0xCC000, 0xD0000, 0xD4000,
99 0xD8000, 0xDC000, 0xE0000, 0xE4000, 0xE8000, 0xEC000
100};
101
102static long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
103
104/* Kernel Loadable Module Entry Points */
105/* Module 'insert' entry point.
106 * o print announcement
107 * o initialize static data
108 *
109 * Return: 0 Ok
110 * < 0 error.
111 * Context: process */
112
113int __init cycx_drv_init(void)
114{
115 printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE,
116 copyright);
117
118 return 0;
119}
120
121/* Module 'remove' entry point.
122 * o release all remaining system resources */
123void cycx_drv_cleanup(void)
124{
125}
126
127/* Kernel APIs */
128/* Set up adapter.
129 * o detect adapter type
130 * o verify hardware configuration options
131 * o check for hardware conflicts
132 * o set up adapter shared memory
133 * o test adapter memory
134 * o load firmware
135 * Return: 0 ok.
136 * < 0 error */
137EXPORT_SYMBOL(cycx_setup);
138int cycx_setup(struct cycx_hw *hw, void *cfm, u32 len, unsigned long dpmbase)
139{
140 int err;
141
142 /* Verify IRQ configuration options */
143 if (!get_option_index(cycx_2x_irq_options, hw->irq)) {
144 printk(KERN_ERR "%s: IRQ %d is invalid!\n", modname, hw->irq);
145 return -EINVAL;
146 }
147
148 /* Setup adapter dual-port memory window and test memory */
149 if (!dpmbase) {
150 printk(KERN_ERR "%s: you must specify the dpm address!\n",
151 modname);
152 return -EINVAL;
153 } else if (!get_option_index(cyc2x_dpmbase_options, dpmbase)) {
154 printk(KERN_ERR "%s: memory address 0x%lX is invalid!\n",
155 modname, dpmbase);
156 return -EINVAL;
157 }
158
159 hw->dpmbase = ioremap(dpmbase, CYCX_WINDOWSIZE);
160 hw->dpmsize = CYCX_WINDOWSIZE;
161
162 if (!detect_cyc2x(hw->dpmbase)) {
163 printk(KERN_ERR "%s: adapter Cyclom 2X not found at "
164 "address 0x%lX!\n", modname, dpmbase);
165 return -EINVAL;
166 }
167
168 printk(KERN_INFO "%s: found Cyclom 2X card at address 0x%lX.\n",
169 modname, dpmbase);
170
171 /* Load firmware. If loader fails then shut down adapter */
172 err = load_cyc2x(hw, cfm, len);
173
174 if (err)
175 cycx_down(hw); /* shutdown adapter */
176
177 return err;
178}
179
180EXPORT_SYMBOL(cycx_down);
181int cycx_down(struct cycx_hw *hw)
182{
183 iounmap(hw->dpmbase);
184 return 0;
185}
186
187/* Enable interrupt generation. */
188EXPORT_SYMBOL(cycx_inten);
189void cycx_inten(struct cycx_hw *hw)
190{
191 writeb(0, hw->dpmbase);
192}
193
194/* Generate an interrupt to adapter's CPU. */
195EXPORT_SYMBOL(cycx_intr);
196void cycx_intr(struct cycx_hw *hw)
197{
198 writew(0, hw->dpmbase + GEN_CYCX_INTR);
199}
200
201/* Execute Adapter Command.
202 * o Set exec flag.
203 * o Busy-wait until flag is reset. */
204EXPORT_SYMBOL(cycx_exec);
205int cycx_exec(void __iomem *addr)
206{
207 u16 i = 0;
208 /* wait till addr content is zeroed */
209
210 while (readw(addr)) {
211 udelay(1000);
212
213 if (++i > 50)
214 return -1;
215 }
216
217 return 0;
218}
219
220/* Read absolute adapter memory.
221 * Transfer data from adapter's memory to data buffer. */
222EXPORT_SYMBOL(cycx_peek);
223int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
224{
225 if (len == 1)
226 *(u8*)buf = readb(hw->dpmbase + addr);
227 else
228 memcpy_fromio(buf, hw->dpmbase + addr, len);
229
230 return 0;
231}
232
233/* Write Absolute Adapter Memory.
234 * Transfer data from data buffer to adapter's memory. */
235EXPORT_SYMBOL(cycx_poke);
236int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len)
237{
238 if (len == 1)
239 writeb(*(u8*)buf, hw->dpmbase + addr);
240 else
241 memcpy_toio(hw->dpmbase + addr, buf, len);
242
243 return 0;
244}
245
246/* Hardware-Specific Functions */
247
248/* Load Aux Routines */
249/* Reset board hardware.
250 return 1 if memory exists at addr and 0 if not. */
251static int memory_exists(void __iomem *addr)
252{
253 int tries = 0;
254
255 for (; tries < 3 ; tries++) {
256 writew(TEST_PATTERN, addr + 0x10);
257
258 if (readw(addr + 0x10) == TEST_PATTERN)
259 if (readw(addr + 0x10) == TEST_PATTERN)
260 return 1;
261
262 delay_cycx(1);
263 }
264
265 return 0;
266}
267
268/* Load reset code. */
269static void reset_load(void __iomem *addr, u8 *buffer, u32 cnt)
270{
271 void __iomem *pt_code = addr + RESET_OFFSET;
272 u16 i; /*, j; */
273
274 for (i = 0 ; i < cnt ; i++) {
275/* for (j = 0 ; j < 50 ; j++); Delay - FIXME busy waiting... */
276 writeb(*buffer++, pt_code++);
277 }
278}
279
280/* Load buffer using boot interface.
281 * o copy data from buffer to Cyclom-X memory
282 * o wait for reset code to copy it to right portion of memory */
283static int buffer_load(void __iomem *addr, u8 *buffer, u32 cnt)
284{
285 memcpy_toio(addr + DATA_OFFSET, buffer, cnt);
286 writew(GEN_BOOT_DAT, addr + CMD_OFFSET);
287
288 return wait_cyc(addr);
289}
290
291/* Set up entry point and kick start Cyclom-X CPU. */
292static void cycx_start(void __iomem *addr)
293{
294 /* put in 0x30 offset the jump instruction to the code entry point */
295 writeb(0xea, addr + 0x30);
296 writeb(0x00, addr + 0x31);
297 writeb(0xc4, addr + 0x32);
298 writeb(0x00, addr + 0x33);
299 writeb(0x00, addr + 0x34);
300
301 /* cmd to start executing code */
302 writew(GEN_START, addr + CMD_OFFSET);
303}
304
305/* Load and boot reset code. */
306static void cycx_reset_boot(void __iomem *addr, u8 *code, u32 len)
307{
308 void __iomem *pt_start = addr + START_OFFSET;
309
310 writeb(0xea, pt_start++); /* jmp to f000:3f00 */
311 writeb(0x00, pt_start++);
312 writeb(0xfc, pt_start++);
313 writeb(0x00, pt_start++);
314 writeb(0xf0, pt_start);
315 reset_load(addr, code, len);
316
317 /* 80186 was in hold, go */
318 writeb(0, addr + START_CPU);
319 delay_cycx(1);
320}
321
322/* Load data.bin file through boot (reset) interface. */
323static int cycx_data_boot(void __iomem *addr, u8 *code, u32 len)
324{
325 void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
326 u32 i;
327
328 /* boot buffer lenght */
329 writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
330 writew(GEN_DEFPAR, pt_boot_cmd);
331
332 if (wait_cyc(addr) < 0)
333 return -1;
334
335 writew(0, pt_boot_cmd + sizeof(u16));
336 writew(0x4000, pt_boot_cmd + 2 * sizeof(u16));
337 writew(GEN_SET_SEG, pt_boot_cmd);
338
339 if (wait_cyc(addr) < 0)
340 return -1;
341
342 for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
343 if (buffer_load(addr, code + i,
344 min_t(u32, CFM_LOAD_BUFSZ, (len - i))) < 0) {
345 printk(KERN_ERR "%s: Error !!\n", modname);
346 return -1;
347 }
348
349 return 0;
350}
351
352
353/* Load code.bin file through boot (reset) interface. */
354static int cycx_code_boot(void __iomem *addr, u8 *code, u32 len)
355{
356 void __iomem *pt_boot_cmd = addr + CMD_OFFSET;
357 u32 i;
358
359 /* boot buffer lenght */
360 writew(CFM_LOAD_BUFSZ, pt_boot_cmd + sizeof(u16));
361 writew(GEN_DEFPAR, pt_boot_cmd);
362
363 if (wait_cyc(addr) < 0)
364 return -1;
365
366 writew(0x0000, pt_boot_cmd + sizeof(u16));
367 writew(0xc400, pt_boot_cmd + 2 * sizeof(u16));
368 writew(GEN_SET_SEG, pt_boot_cmd);
369
370 if (wait_cyc(addr) < 0)
371 return -1;
372
373 for (i = 0 ; i < len ; i += CFM_LOAD_BUFSZ)
374 if (buffer_load(addr, code + i,
375 min_t(u32, CFM_LOAD_BUFSZ, (len - i)))) {
376 printk(KERN_ERR "%s: Error !!\n", modname);
377 return -1;
378 }
379
380 return 0;
381}
382
383/* Load adapter from the memory image of the CYCX firmware module.
384 * o verify firmware integrity and compatibility
385 * o start adapter up */
386static int load_cyc2x(struct cycx_hw *hw, struct cycx_firmware *cfm, u32 len)
387{
388 int i, j;
389 struct cycx_fw_header *img_hdr;
390 u8 *reset_image,
391 *data_image,
392 *code_image;
393 void __iomem *pt_cycld = hw->dpmbase + 0x400;
394 u16 cksum;
395
396 /* Announce */
397 printk(KERN_INFO "%s: firmware signature=\"%s\"\n", modname,
398 cfm->signature);
399
400 /* Verify firmware signature */
401 if (strcmp(cfm->signature, CFM_SIGNATURE)) {
402 printk(KERN_ERR "%s:load_cyc2x: not Cyclom-2X firmware!\n",
403 modname);
404 return -EINVAL;
405 }
406
407 printk(KERN_INFO "%s: firmware version=%u\n", modname, cfm->version);
408
409 /* Verify firmware module format version */
410 if (cfm->version != CFM_VERSION) {
411 printk(KERN_ERR "%s:%s: firmware format %u rejected! "
412 "Expecting %u.\n",
413 modname, __FUNCTION__, cfm->version, CFM_VERSION);
414 return -EINVAL;
415 }
416
417 /* Verify firmware module length and checksum */
418 cksum = checksum((u8*)&cfm->info, sizeof(struct cycx_fw_info) +
419 cfm->info.codesize);
420/*
421 FIXME cfm->info.codesize is off by 2
422 if (((len - sizeof(struct cycx_firmware) - 1) != cfm->info.codesize) ||
423*/
424 if (cksum != cfm->checksum) {
425 printk(KERN_ERR "%s:%s: firmware corrupted!\n",
426 modname, __FUNCTION__);
427 printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n",
428 len - (int)sizeof(struct cycx_firmware) - 1,
429 cfm->info.codesize);
430 printk(KERN_ERR " chksum = 0x%x (expected 0x%x)\n",
431 cksum, cfm->checksum);
432 return -EINVAL;
433 }
434
435 /* If everything is ok, set reset, data and code pointers */
436 img_hdr = (struct cycx_fw_header *)&cfm->image;
437#ifdef FIRMWARE_DEBUG
438 printk(KERN_INFO "%s:%s: image sizes\n", __FUNCTION__, modname);
439 printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size);
440 printk(KERN_INFO " data=%lu\n", img_hdr->data_size);
441 printk(KERN_INFO " code=%lu\n", img_hdr->code_size);
442#endif
443 reset_image = ((u8 *)img_hdr) + sizeof(struct cycx_fw_header);
444 data_image = reset_image + img_hdr->reset_size;
445 code_image = data_image + img_hdr->data_size;
446
447 /*---- Start load ----*/
448 /* Announce */
449 printk(KERN_INFO "%s: loading firmware %s (ID=%u)...\n", modname,
450 cfm->descr[0] ? cfm->descr : "unknown firmware",
451 cfm->info.codeid);
452
453 for (i = 0 ; i < 5 ; i++) {
454 /* Reset Cyclom hardware */
455 if (!reset_cyc2x(hw->dpmbase)) {
456 printk(KERN_ERR "%s: dpm problem or board not found\n",
457 modname);
458 return -EINVAL;
459 }
460
461 /* Load reset.bin */
462 cycx_reset_boot(hw->dpmbase, reset_image, img_hdr->reset_size);
463 /* reset is waiting for boot */
464 writew(GEN_POWER_ON, pt_cycld);
465 delay_cycx(1);
466
467 for (j = 0 ; j < 3 ; j++)
468 if (!readw(pt_cycld))
469 goto reset_loaded;
470 else
471 delay_cycx(1);
472 }
473
474 printk(KERN_ERR "%s: reset not started.\n", modname);
475 return -EINVAL;
476
477reset_loaded:
478 /* Load data.bin */
479 if (cycx_data_boot(hw->dpmbase, data_image, img_hdr->data_size)) {
480 printk(KERN_ERR "%s: cannot load data file.\n", modname);
481 return -EINVAL;
482 }
483
484 /* Load code.bin */
485 if (cycx_code_boot(hw->dpmbase, code_image, img_hdr->code_size)) {
486 printk(KERN_ERR "%s: cannot load code file.\n", modname);
487 return -EINVAL;
488 }
489
490 /* Prepare boot-time configuration data */
491 cycx_bootcfg(hw);
492
493 /* kick-off CPU */
494 cycx_start(hw->dpmbase);
495
496 /* Arthur Ganzert's tip: wait a while after the firmware loading...
497 seg abr 26 17:17:12 EST 1999 - acme */
498 delay_cycx(7);
499 printk(KERN_INFO "%s: firmware loaded!\n", modname);
500
501 /* enable interrupts */
502 cycx_inten(hw);
503
504 return 0;
505}
506
507/* Prepare boot-time firmware configuration data.
508 * o initialize configuration data area
509 From async.doc - V_3.4.0 - 07/18/1994
510 - As of now, only static buffers are available to the user.
511 So, the bit VD_RXDIRC must be set in 'valid'. That means that user
512 wants to use the static transmission and reception buffers. */
513static void cycx_bootcfg(struct cycx_hw *hw)
514{
515 /* use fixed buffers */
516 writeb(FIXED_BUFFERS, hw->dpmbase + CONF_OFFSET);
517}
518
519/* Detect Cyclom 2x adapter.
520 * Following tests are used to detect Cyclom 2x adapter:
521 * to be completed based on the tests done below
522 * Return 1 if detected o.k. or 0 if failed.
523 * Note: This test is destructive! Adapter will be left in shutdown
524 * state after the test. */
525static int detect_cyc2x(void __iomem *addr)
526{
527 reset_cyc2x(addr);
528
529 return memory_exists(addr);
530}
531
532/* Miscellaneous */
533/* Get option's index into the options list.
534 * Return option's index (1 .. N) or zero if option is invalid. */
535static int get_option_index(long *optlist, long optval)
536{
537 int i = 1;
538
539 for (; i <= optlist[0]; ++i)
540 if (optlist[i] == optval)
541 return i;
542
543 return 0;
544}
545
546/* Reset adapter's CPU. */
547static int reset_cyc2x(void __iomem *addr)
548{
549 writeb(0, addr + RST_ENABLE);
550 delay_cycx(2);
551 writeb(0, addr + RST_DISABLE);
552 delay_cycx(2);
553
554 return memory_exists(addr);
555}
556
557/* Delay */
558static void delay_cycx(int sec)
559{
560 set_current_state(TASK_INTERRUPTIBLE);
561 schedule_timeout(sec * HZ);
562}
563
564/* Calculate 16-bit CRC using CCITT polynomial. */
565static u16 checksum(u8 *buf, u32 len)
566{
567 u16 crc = 0;
568 u16 mask, flag;
569
570 for (; len; --len, ++buf)
571 for (mask = 0x80; mask; mask >>= 1) {
572 flag = (crc & 0x8000);
573 crc <<= 1;
574 crc |= ((*buf & mask) ? 1 : 0);
575
576 if (flag)
577 crc ^= 0x1021;
578 }
579
580 return crc;
581}
582
583module_init(cycx_drv_init);
584module_exit(cycx_drv_cleanup);
585
586/* End */
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
new file mode 100644
index 000000000000..7b48064364dc
--- /dev/null
+++ b/drivers/net/wan/cycx_main.c
@@ -0,0 +1,351 @@
1/*
2* cycx_main.c Cyclades Cyclom 2X WAN Link Driver. Main module.
3*
4* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5*
6* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
7*
8* Based on sdlamain.c by Gene Kozin <genek@compuserve.com> &
9* Jaspreet Singh <jaspreet@sangoma.com>
10*
11* This program is free software; you can redistribute it and/or
12* modify it under the terms of the GNU General Public License
13* as published by the Free Software Foundation; either version
14* 2 of the License, or (at your option) any later version.
15* ============================================================================
16* Please look at the bitkeeper changelog (or any other scm tool that ends up
17* importing bitkeeper changelog or that replaces bitkeeper in the future as
18* main tool for linux development).
19*
20* 2001/05/09 acme Fix MODULE_DESC for debug, .bss nitpicks,
21* some cleanups
22* 2000/07/13 acme remove useless #ifdef MODULE and crap
23* #if KERNEL_VERSION > blah
24* 2000/07/06 acme __exit at cyclomx_cleanup
25* 2000/04/02 acme dprintk and cycx_debug
26* module_init/module_exit
27* 2000/01/21 acme rename cyclomx_open to cyclomx_mod_inc_use_count
28* and cyclomx_close to cyclomx_mod_dec_use_count
29* 2000/01/08 acme cleanup
30* 1999/11/06 acme cycx_down back to life (it needs to be
31* called to iounmap the dpmbase)
32* 1999/08/09 acme removed references to enable_tx_int
33* use spinlocks instead of cli/sti in
34* cyclomx_set_state
35* 1999/05/19 acme works directly linked into the kernel
36* init_waitqueue_head for 2.3.* kernel
37* 1999/05/18 acme major cleanup (polling not needed), etc
38* 1998/08/28 acme minor cleanup (ioctls for firmware deleted)
39* queue_task activated
40* 1998/08/08 acme Initial version.
41*/
42
43#include <linux/config.h> /* OS configuration options */
44#include <linux/stddef.h> /* offsetof(), etc. */
45#include <linux/errno.h> /* return codes */
46#include <linux/string.h> /* inline memset(), etc. */
47#include <linux/slab.h> /* kmalloc(), kfree() */
48#include <linux/kernel.h> /* printk(), and other useful stuff */
49#include <linux/module.h> /* support for loadable modules */
50#include <linux/ioport.h> /* request_region(), release_region() */
51#include <linux/wanrouter.h> /* WAN router definitions */
52#include <linux/cyclomx.h> /* cyclomx common user API definitions */
53#include <linux/init.h> /* __init (when not using as a module) */
54
55unsigned int cycx_debug;
56
57MODULE_AUTHOR("Arnaldo Carvalho de Melo");
58MODULE_DESCRIPTION("Cyclom 2X Sync Card Driver.");
59MODULE_LICENSE("GPL");
60module_param(cycx_debug, int, 0);
61MODULE_PARM_DESC(cycx_debug, "cyclomx debug level");
62
63/* Defines & Macros */
64
65#define CYCX_DRV_VERSION 0 /* version number */
66#define CYCX_DRV_RELEASE 11 /* release (minor version) number */
67#define CYCX_MAX_CARDS 1 /* max number of adapters */
68
69#define CONFIG_CYCX_CARDS 1
70
71/* Function Prototypes */
72
73/* WAN link driver entry points */
74static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf);
75static int cycx_wan_shutdown(struct wan_device *wandev);
76
77/* Miscellaneous functions */
78static irqreturn_t cycx_isr(int irq, void *dev_id, struct pt_regs *regs);
79
80/* Global Data
81 * Note: All data must be explicitly initialized!!!
82 */
83
84/* private data */
85static char cycx_drvname[] = "cyclomx";
86static char cycx_fullname[] = "CYCLOM 2X(tm) Sync Card Driver";
87static char cycx_copyright[] = "(c) 1998-2003 Arnaldo Carvalho de Melo "
88 "<acme@conectiva.com.br>";
89static int cycx_ncards = CONFIG_CYCX_CARDS;
90static struct cycx_device *cycx_card_array; /* adapter data space */
91
92/* Kernel Loadable Module Entry Points */
93
94/*
95 * Module 'insert' entry point.
96 * o print announcement
97 * o allocate adapter data space
98 * o initialize static data
99 * o register all cards with WAN router
100 * o calibrate Cyclom 2X shared memory access delay.
101 *
102 * Return: 0 Ok
103 * < 0 error.
104 * Context: process
105 */
106int __init cycx_init(void)
107{
108 int cnt, err = -ENOMEM;
109
110 printk(KERN_INFO "%s v%u.%u %s\n",
111 cycx_fullname, CYCX_DRV_VERSION, CYCX_DRV_RELEASE,
112 cycx_copyright);
113
114 /* Verify number of cards and allocate adapter data space */
115 cycx_ncards = min_t(int, cycx_ncards, CYCX_MAX_CARDS);
116 cycx_ncards = max_t(int, cycx_ncards, 1);
117 cycx_card_array = kmalloc(sizeof(struct cycx_device) * cycx_ncards,
118 GFP_KERNEL);
119 if (!cycx_card_array)
120 goto out;
121
122 memset(cycx_card_array, 0, sizeof(struct cycx_device) * cycx_ncards);
123
124 /* Register adapters with WAN router */
125 for (cnt = 0; cnt < cycx_ncards; ++cnt) {
126 struct cycx_device *card = &cycx_card_array[cnt];
127 struct wan_device *wandev = &card->wandev;
128
129 sprintf(card->devname, "%s%d", cycx_drvname, cnt + 1);
130 wandev->magic = ROUTER_MAGIC;
131 wandev->name = card->devname;
132 wandev->private = card;
133 wandev->setup = cycx_wan_setup;
134 wandev->shutdown = cycx_wan_shutdown;
135 err = register_wan_device(wandev);
136
137 if (err) {
138 printk(KERN_ERR "%s: %s registration failed with "
139 "error %d!\n",
140 cycx_drvname, card->devname, err);
141 break;
142 }
143 }
144
145 err = -ENODEV;
146 if (!cnt) {
147 kfree(cycx_card_array);
148 goto out;
149 }
150 err = 0;
151 cycx_ncards = cnt; /* adjust actual number of cards */
152out: return err;
153}
154
155/*
156 * Module 'remove' entry point.
157 * o unregister all adapters from the WAN router
158 * o release all remaining system resources
159 */
160static void __exit cycx_exit(void)
161{
162 int i = 0;
163
164 for (; i < cycx_ncards; ++i) {
165 struct cycx_device *card = &cycx_card_array[i];
166 unregister_wan_device(card->devname);
167 }
168
169 kfree(cycx_card_array);
170}
171
172/* WAN Device Driver Entry Points */
173/*
174 * Setup/configure WAN link driver.
175 * o check adapter state
176 * o make sure firmware is present in configuration
177 * o allocate interrupt vector
178 * o setup Cyclom 2X hardware
179 * o call appropriate routine to perform protocol-specific initialization
180 *
181 * This function is called when router handles ROUTER_SETUP IOCTL. The
182 * configuration structure is in kernel memory (including extended data, if
183 * any).
184 */
185static int cycx_wan_setup(struct wan_device *wandev, wandev_conf_t *conf)
186{
187 int rc = -EFAULT;
188 struct cycx_device *card;
189 int irq;
190
191 /* Sanity checks */
192
193 if (!wandev || !wandev->private || !conf)
194 goto out;
195
196 card = wandev->private;
197 rc = -EBUSY;
198 if (wandev->state != WAN_UNCONFIGURED)
199 goto out;
200
201 rc = -EINVAL;
202 if (!conf->data_size || !conf->data) {
203 printk(KERN_ERR "%s: firmware not found in configuration "
204 "data!\n", wandev->name);
205 goto out;
206 }
207
208 if (conf->irq <= 0) {
209 printk(KERN_ERR "%s: can't configure without IRQ!\n",
210 wandev->name);
211 goto out;
212 }
213
214 /* Allocate IRQ */
215 irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
216
217 if (request_irq(irq, cycx_isr, 0, wandev->name, card)) {
218 printk(KERN_ERR "%s: can't reserve IRQ %d!\n",
219 wandev->name, irq);
220 goto out;
221 }
222
223 /* Configure hardware, load firmware, etc. */
224 memset(&card->hw, 0, sizeof(card->hw));
225 card->hw.irq = irq;
226 card->hw.dpmsize = CYCX_WINDOWSIZE;
227 card->hw.fwid = CFID_X25_2X;
228 spin_lock_init(&card->lock);
229 init_waitqueue_head(&card->wait_stats);
230
231 rc = cycx_setup(&card->hw, conf->data, conf->data_size, conf->maddr);
232 if (rc)
233 goto out_irq;
234
235 /* Initialize WAN device data space */
236 wandev->irq = irq;
237 wandev->dma = wandev->ioport = 0;
238 wandev->maddr = (unsigned long)card->hw.dpmbase;
239 wandev->msize = card->hw.dpmsize;
240 wandev->hw_opt[2] = 0;
241 wandev->hw_opt[3] = card->hw.fwid;
242
243 /* Protocol-specific initialization */
244 switch (card->hw.fwid) {
245#ifdef CONFIG_CYCLOMX_X25
246 case CFID_X25_2X:
247 rc = cycx_x25_wan_init(card, conf);
248 break;
249#endif
250 default:
251 printk(KERN_ERR "%s: this firmware is not supported!\n",
252 wandev->name);
253 rc = -EINVAL;
254 }
255
256 if (rc) {
257 cycx_down(&card->hw);
258 goto out_irq;
259 }
260
261 rc = 0;
262out:
263 return rc;
264out_irq:
265 free_irq(irq, card);
266 goto out;
267}
268
269/*
270 * Shut down WAN link driver.
271 * o shut down adapter hardware
272 * o release system resources.
273 *
274 * This function is called by the router when device is being unregistered or
275 * when it handles ROUTER_DOWN IOCTL.
276 */
277static int cycx_wan_shutdown(struct wan_device *wandev)
278{
279 int ret = -EFAULT;
280 struct cycx_device *card;
281
282 /* sanity checks */
283 if (!wandev || !wandev->private)
284 goto out;
285
286 ret = 0;
287 if (wandev->state == WAN_UNCONFIGURED)
288 goto out;
289
290 card = wandev->private;
291 wandev->state = WAN_UNCONFIGURED;
292 cycx_down(&card->hw);
293 printk(KERN_INFO "%s: irq %d being freed!\n", wandev->name,
294 wandev->irq);
295 free_irq(wandev->irq, card);
296out: return ret;
297}
298
299/* Miscellaneous */
300/*
301 * Cyclom 2X Interrupt Service Routine.
302 * o acknowledge Cyclom 2X hardware interrupt.
303 * o call protocol-specific interrupt service routine, if any.
304 */
305static irqreturn_t cycx_isr(int irq, void *dev_id, struct pt_regs *regs)
306{
307 struct cycx_device *card = (struct cycx_device *)dev_id;
308
309 if (!card || card->wandev.state == WAN_UNCONFIGURED)
310 goto out;
311
312 if (card->in_isr) {
313 printk(KERN_WARNING "%s: interrupt re-entrancy on IRQ %d!\n",
314 card->devname, card->wandev.irq);
315 goto out;
316 }
317
318 if (card->isr)
319 card->isr(card);
320 return IRQ_HANDLED;
321out:
322 return IRQ_NONE;
323}
324
325/* Set WAN device state. */
326void cycx_set_state(struct cycx_device *card, int state)
327{
328 unsigned long flags;
329 char *string_state = NULL;
330
331 spin_lock_irqsave(&card->lock, flags);
332
333 if (card->wandev.state != state) {
334 switch (state) {
335 case WAN_CONNECTED:
336 string_state = "connected!";
337 break;
338 case WAN_DISCONNECTED:
339 string_state = "disconnected!";
340 break;
341 }
342 printk(KERN_INFO "%s: link %s\n", card->devname, string_state);
343 card->wandev.state = state;
344 }
345
346 card->state_tick = jiffies;
347 spin_unlock_irqrestore(&card->lock, flags);
348}
349
350module_init(cycx_init);
351module_exit(cycx_exit);
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
new file mode 100644
index 000000000000..5b48cd8568f5
--- /dev/null
+++ b/drivers/net/wan/cycx_x25.c
@@ -0,0 +1,1609 @@
1/*
2* cycx_x25.c Cyclom 2X WAN Link Driver. X.25 module.
3*
4* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5*
6* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
7*
8* Based on sdla_x25.c by Gene Kozin <genek@compuserve.com>
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License
12* as published by the Free Software Foundation; either version
13* 2 of the License, or (at your option) any later version.
14* ============================================================================
15* 2001/01/12 acme use dev_kfree_skb_irq on interrupt context
16* 2000/04/02 acme dprintk, cycx_debug
17* fixed the bug introduced in get_dev_by_lcn and
18* get_dev_by_dte_addr by the anonymous hacker
19* that converted this driver to softnet
20* 2000/01/08 acme cleanup
21* 1999/10/27 acme use ARPHRD_HWX25 so that the X.25 stack know
22* that we have a X.25 stack implemented in
23* firmware onboard
24* 1999/10/18 acme support for X.25 sockets in if_send,
25* beware: socket(AF_X25...) IS WORK IN PROGRESS,
26* TCP/IP over X.25 via wanrouter not affected,
27* working.
28* 1999/10/09 acme chan_disc renamed to chan_disconnect,
29* began adding support for X.25 sockets:
30* conf->protocol in new_if
31* 1999/10/05 acme fixed return E... to return -E...
32* 1999/08/10 acme serialized access to the card thru a spinlock
33* in x25_exec
34* 1999/08/09 acme removed per channel spinlocks
35* removed references to enable_tx_int
36* 1999/05/28 acme fixed nibble_to_byte, ackvc now properly treated
37* if_send simplified
38* 1999/05/25 acme fixed t1, t2, t21 & t23 configuration
39* use spinlocks instead of cli/sti in some points
40* 1999/05/24 acme finished the x25_get_stat function
41* 1999/05/23 acme dev->type = ARPHRD_X25 (tcpdump only works,
42* AFAIT, with ARPHRD_ETHER). This seems to be
43* needed to use socket(AF_X25)...
44* Now the config file must specify a peer media
45* address for svc channels over a crossover cable.
46* Removed hold_timeout from x25_channel_t,
47* not used.
48* A little enhancement in the DEBUG processing
49* 1999/05/22 acme go to DISCONNECTED in disconnect_confirm_intr,
50* instead of chan_disc.
51* 1999/05/16 marcelo fixed timer initialization in SVCs
52* 1999/01/05 acme x25_configure now get (most of) all
53* parameters...
54* 1999/01/05 acme pktlen now (correctly) uses log2 (value
55* configured)
56* 1999/01/03 acme judicious use of data types (u8, u16, u32, etc)
57* 1999/01/03 acme cyx_isr: reset dpmbase to acknowledge
58* indication (interrupt from cyclom 2x)
59* 1999/01/02 acme cyx_isr: first hackings...
60* 1999/01/0203 acme when initializing an array don't give less
61* elements than declared...
62* example: char send_cmd[6] = "?\xFF\x10";
63* you'll gonna lose a couple hours, 'cause your
64* brain won't admit that there's an error in the
65* above declaration... the side effect is that
66* memset is put into the unresolved symbols
67* instead of using the inline memset functions...
68* 1999/01/02 acme began chan_connect, chan_send, x25_send
69* 1998/12/31 acme x25_configure
70* this code can be compiled as non module
71* 1998/12/27 acme code cleanup
72* IPX code wiped out! let's decrease code
73* complexity for now, remember: I'm learning! :)
74* bps_to_speed_code OK
75* 1998/12/26 acme Minimal debug code cleanup
76* 1998/08/08 acme Initial version.
77*/
78
79#define CYCLOMX_X25_DEBUG 1
80
81#include <linux/errno.h> /* return codes */
82#include <linux/if_arp.h> /* ARPHRD_HWX25 */
83#include <linux/kernel.h> /* printk(), and other useful stuff */
84#include <linux/module.h>
85#include <linux/string.h> /* inline memset(), etc. */
86#include <linux/slab.h> /* kmalloc(), kfree() */
87#include <linux/stddef.h> /* offsetof(), etc. */
88#include <linux/wanrouter.h> /* WAN router definitions */
89
90#include <asm/byteorder.h> /* htons(), etc. */
91
92#include <linux/cyclomx.h> /* Cyclom 2X common user API definitions */
93#include <linux/cycx_x25.h> /* X.25 firmware API definitions */
94
95#include <net/x25device.h>
96
97/* Defines & Macros */
98#define CYCX_X25_MAX_CMD_RETRY 5
99#define CYCX_X25_CHAN_MTU 2048 /* unfragmented logical channel MTU */
100
101/* Data Structures */
102/* This is an extension of the 'struct net_device' we create for each network
103 interface to keep the rest of X.25 channel-specific data. */
104struct cycx_x25_channel {
105 /* This member must be first. */
106 struct net_device *slave; /* WAN slave */
107
108 char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */
109 char addr[WAN_ADDRESS_SZ+1]; /* media address, ASCIIZ */
110 char *local_addr; /* local media address, ASCIIZ -
111 svc thru crossover cable */
112 s16 lcn; /* logical channel number/conn.req.key*/
113 u8 link;
114 struct timer_list timer; /* timer used for svc channel disc. */
115 u16 protocol; /* ethertype, 0 - multiplexed */
116 u8 svc; /* 0 - permanent, 1 - switched */
117 u8 state; /* channel state */
118 u8 drop_sequence; /* mark sequence for dropping */
119 u32 idle_tmout; /* sec, before disconnecting */
120 struct sk_buff *rx_skb; /* receive socket buffer */
121 struct cycx_device *card; /* -> owner */
122 struct net_device_stats ifstats;/* interface statistics */
123};
124
125/* Function Prototypes */
126/* WAN link driver entry points. These are called by the WAN router module. */
127static int cycx_wan_update(struct wan_device *wandev),
128 cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
129 wanif_conf_t *conf),
130 cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev);
131
132/* Network device interface */
133static int cycx_netdevice_init(struct net_device *dev),
134 cycx_netdevice_open(struct net_device *dev),
135 cycx_netdevice_stop(struct net_device *dev),
136 cycx_netdevice_hard_header(struct sk_buff *skb,
137 struct net_device *dev, u16 type,
138 void *daddr, void *saddr, unsigned len),
139 cycx_netdevice_rebuild_header(struct sk_buff *skb),
140 cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
141 struct net_device *dev);
142
143static struct net_device_stats *
144 cycx_netdevice_get_stats(struct net_device *dev);
145
146/* Interrupt handlers */
147static void cycx_x25_irq_handler(struct cycx_device *card),
148 cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
149 cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd),
150 cycx_x25_irq_log(struct cycx_device *card,
151 struct cycx_x25_cmd *cmd),
152 cycx_x25_irq_stat(struct cycx_device *card,
153 struct cycx_x25_cmd *cmd),
154 cycx_x25_irq_connect_confirm(struct cycx_device *card,
155 struct cycx_x25_cmd *cmd),
156 cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
157 struct cycx_x25_cmd *cmd),
158 cycx_x25_irq_connect(struct cycx_device *card,
159 struct cycx_x25_cmd *cmd),
160 cycx_x25_irq_disconnect(struct cycx_device *card,
161 struct cycx_x25_cmd *cmd),
162 cycx_x25_irq_spurious(struct cycx_device *card,
163 struct cycx_x25_cmd *cmd);
164
165/* X.25 firmware interface functions */
166static int cycx_x25_configure(struct cycx_device *card,
167 struct cycx_x25_config *conf),
168 cycx_x25_get_stats(struct cycx_device *card),
169 cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
170 int len, void *buf),
171 cycx_x25_connect_response(struct cycx_device *card,
172 struct cycx_x25_channel *chan),
173 cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
174 u8 lcn);
175
176/* channel functions */
177static int cycx_x25_chan_connect(struct net_device *dev),
178 cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb);
179
180static void cycx_x25_chan_disconnect(struct net_device *dev),
181 cycx_x25_chan_send_event(struct net_device *dev, u8 event);
182
183/* Miscellaneous functions */
184static void cycx_x25_set_chan_state(struct net_device *dev, u8 state),
185 cycx_x25_chan_timer(unsigned long d);
186
187static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble),
188 reset_timer(struct net_device *dev);
189
190static u8 bps_to_speed_code(u32 bps);
191static u8 cycx_log2(u32 n);
192
193static unsigned dec_to_uint(u8 *str, int len);
194
195static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
196 s16 lcn);
197static struct net_device *
198 cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte);
199
200#ifdef CYCLOMX_X25_DEBUG
201static void hex_dump(char *msg, unsigned char *p, int len);
202static void cycx_x25_dump_config(struct cycx_x25_config *conf);
203static void cycx_x25_dump_stats(struct cycx_x25_stats *stats);
204static void cycx_x25_dump_devs(struct wan_device *wandev);
205#else
206#define hex_dump(msg, p, len)
207#define cycx_x25_dump_config(conf)
208#define cycx_x25_dump_stats(stats)
209#define cycx_x25_dump_devs(wandev)
210#endif
211/* Public Functions */
212
213/* X.25 Protocol Initialization routine.
214 *
215 * This routine is called by the main Cyclom 2X module during setup. At this
216 * point adapter is completely initialized and X.25 firmware is running.
217 * o configure adapter
218 * o initialize protocol-specific fields of the adapter data space.
219 *
220 * Return: 0 o.k.
221 * < 0 failure. */
222int cycx_x25_wan_init(struct cycx_device *card, wandev_conf_t *conf)
223{
224 struct cycx_x25_config cfg;
225
226 /* Verify configuration ID */
227 if (conf->config_id != WANCONFIG_X25) {
228 printk(KERN_INFO "%s: invalid configuration ID %u!\n",
229 card->devname, conf->config_id);
230 return -EINVAL;
231 }
232
233 /* Initialize protocol-specific fields */
234 card->mbox = card->hw.dpmbase + X25_MBOX_OFFS;
235 card->u.x.connection_keys = 0;
236 spin_lock_init(&card->u.x.lock);
237
238 /* Configure adapter. Here we set reasonable defaults, then parse
239 * device configuration structure and set configuration options.
240 * Most configuration options are verified and corrected (if
241 * necessary) since we can't rely on the adapter to do so and don't
242 * want it to fail either. */
243 memset(&cfg, 0, sizeof(cfg));
244 cfg.link = 0;
245 cfg.clock = conf->clocking == WANOPT_EXTERNAL ? 8 : 55;
246 cfg.speed = bps_to_speed_code(conf->bps);
247 cfg.n3win = 7;
248 cfg.n2win = 2;
249 cfg.n2 = 5;
250 cfg.nvc = 1;
251 cfg.npvc = 1;
252 cfg.flags = 0x02; /* default = V35 */
253 cfg.t1 = 10; /* line carrier timeout */
254 cfg.t2 = 29; /* tx timeout */
255 cfg.t21 = 180; /* CALL timeout */
256 cfg.t23 = 180; /* CLEAR timeout */
257
258 /* adjust MTU */
259 if (!conf->mtu || conf->mtu >= 512)
260 card->wandev.mtu = 512;
261 else if (conf->mtu >= 256)
262 card->wandev.mtu = 256;
263 else if (conf->mtu >= 128)
264 card->wandev.mtu = 128;
265 else
266 card->wandev.mtu = 64;
267
268 cfg.pktlen = cycx_log2(card->wandev.mtu);
269
270 if (conf->station == WANOPT_DTE) {
271 cfg.locaddr = 3; /* DTE */
272 cfg.remaddr = 1; /* DCE */
273 } else {
274 cfg.locaddr = 1; /* DCE */
275 cfg.remaddr = 3; /* DTE */
276 }
277
278 if (conf->interface == WANOPT_RS232)
279 cfg.flags = 0; /* FIXME just reset the 2nd bit */
280
281 if (conf->u.x25.hi_pvc) {
282 card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, 4095);
283 card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
284 }
285
286 if (conf->u.x25.hi_svc) {
287 card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, 4095);
288 card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
289 }
290
291 if (card->u.x.lo_pvc == 255)
292 cfg.npvc = 0;
293 else
294 cfg.npvc = card->u.x.hi_pvc - card->u.x.lo_pvc + 1;
295
296 cfg.nvc = card->u.x.hi_svc - card->u.x.lo_svc + 1 + cfg.npvc;
297
298 if (conf->u.x25.hdlc_window)
299 cfg.n2win = min_t(unsigned int, conf->u.x25.hdlc_window, 7);
300
301 if (conf->u.x25.pkt_window)
302 cfg.n3win = min_t(unsigned int, conf->u.x25.pkt_window, 7);
303
304 if (conf->u.x25.t1)
305 cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30);
306
307 if (conf->u.x25.t2)
308 cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 30);
309
310 if (conf->u.x25.t11_t21)
311 cfg.t21 = min_t(unsigned int, conf->u.x25.t11_t21, 30);
312
313 if (conf->u.x25.t13_t23)
314 cfg.t23 = min_t(unsigned int, conf->u.x25.t13_t23, 30);
315
316 if (conf->u.x25.n2)
317 cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30);
318
319 /* initialize adapter */
320 if (cycx_x25_configure(card, &cfg))
321 return -EIO;
322
323 /* Initialize protocol-specific fields of adapter data space */
324 card->wandev.bps = conf->bps;
325 card->wandev.interface = conf->interface;
326 card->wandev.clocking = conf->clocking;
327 card->wandev.station = conf->station;
328 card->isr = cycx_x25_irq_handler;
329 card->exec = NULL;
330 card->wandev.update = cycx_wan_update;
331 card->wandev.new_if = cycx_wan_new_if;
332 card->wandev.del_if = cycx_wan_del_if;
333 card->wandev.state = WAN_DISCONNECTED;
334
335 return 0;
336}
337
338/* WAN Device Driver Entry Points */
339/* Update device status & statistics. */
340static int cycx_wan_update(struct wan_device *wandev)
341{
342 /* sanity checks */
343 if (!wandev || !wandev->private)
344 return -EFAULT;
345
346 if (wandev->state == WAN_UNCONFIGURED)
347 return -ENODEV;
348
349 cycx_x25_get_stats(wandev->private);
350
351 return 0;
352}
353
354/* Create new logical channel.
355 * This routine is called by the router when ROUTER_IFNEW IOCTL is being
356 * handled.
357 * o parse media- and hardware-specific configuration
358 * o make sure that a new channel can be created
359 * o allocate resources, if necessary
360 * o prepare network device structure for registration.
361 *
362 * Return: 0 o.k.
363 * < 0 failure (channel will not be created) */
364static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
365 wanif_conf_t *conf)
366{
367 struct cycx_device *card = wandev->private;
368 struct cycx_x25_channel *chan;
369 int err = 0;
370
371 if (!conf->name[0] || strlen(conf->name) > WAN_IFNAME_SZ) {
372 printk(KERN_INFO "%s: invalid interface name!\n",
373 card->devname);
374 return -EINVAL;
375 }
376
377 /* allocate and initialize private data */
378 chan = kmalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL);
379 if (!chan)
380 return -ENOMEM;
381
382 memset(chan, 0, sizeof(*chan));
383 strcpy(chan->name, conf->name);
384 chan->card = card;
385 chan->link = conf->port;
386 chan->protocol = conf->protocol ? ETH_P_X25 : ETH_P_IP;
387 chan->rx_skb = NULL;
388 /* only used in svc connected thru crossover cable */
389 chan->local_addr = NULL;
390
391 if (conf->addr[0] == '@') { /* SVC */
392 int len = strlen(conf->local_addr);
393
394 if (len) {
395 if (len > WAN_ADDRESS_SZ) {
396 printk(KERN_ERR "%s: %s local addr too long!\n",
397 wandev->name, chan->name);
398 kfree(chan);
399 return -EINVAL;
400 } else {
401 chan->local_addr = kmalloc(len + 1, GFP_KERNEL);
402
403 if (!chan->local_addr) {
404 kfree(chan);
405 return -ENOMEM;
406 }
407 }
408
409 strncpy(chan->local_addr, conf->local_addr,
410 WAN_ADDRESS_SZ);
411 }
412
413 chan->svc = 1;
414 strncpy(chan->addr, &conf->addr[1], WAN_ADDRESS_SZ);
415 init_timer(&chan->timer);
416 chan->timer.function = cycx_x25_chan_timer;
417 chan->timer.data = (unsigned long)dev;
418
419 /* Set channel timeouts (default if not specified) */
420 chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90;
421 } else if (is_digit(conf->addr[0])) { /* PVC */
422 s16 lcn = dec_to_uint(conf->addr, 0);
423
424 if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
425 chan->lcn = lcn;
426 else {
427 printk(KERN_ERR
428 "%s: PVC %u is out of range on interface %s!\n",
429 wandev->name, lcn, chan->name);
430 err = -EINVAL;
431 }
432 } else {
433 printk(KERN_ERR "%s: invalid media address on interface %s!\n",
434 wandev->name, chan->name);
435 err = -EINVAL;
436 }
437
438 if (err) {
439 if (chan->local_addr)
440 kfree(chan->local_addr);
441
442 kfree(chan);
443 return err;
444 }
445
446 /* prepare network device data space for registration */
447 strcpy(dev->name, chan->name);
448 dev->init = cycx_netdevice_init;
449 dev->priv = chan;
450
451 return 0;
452}
453
454/* Delete logical channel. */
455static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
456{
457 if (dev->priv) {
458 struct cycx_x25_channel *chan = dev->priv;
459
460 if (chan->svc) {
461 if (chan->local_addr)
462 kfree(chan->local_addr);
463
464 if (chan->state == WAN_CONNECTED)
465 del_timer(&chan->timer);
466 }
467
468 kfree(chan);
469 dev->priv = NULL;
470 }
471
472 return 0;
473}
474
475/* Network Device Interface */
476/* Initialize Linux network interface.
477 *
478 * This routine is called only once for each interface, during Linux network
479 * interface registration. Returning anything but zero will fail interface
480 * registration. */
481static int cycx_netdevice_init(struct net_device *dev)
482{
483 struct cycx_x25_channel *chan = dev->priv;
484 struct cycx_device *card = chan->card;
485 struct wan_device *wandev = &card->wandev;
486
487 /* Initialize device driver entry points */
488 dev->open = cycx_netdevice_open;
489 dev->stop = cycx_netdevice_stop;
490 dev->hard_header = cycx_netdevice_hard_header;
491 dev->rebuild_header = cycx_netdevice_rebuild_header;
492 dev->hard_start_xmit = cycx_netdevice_hard_start_xmit;
493 dev->get_stats = cycx_netdevice_get_stats;
494
495 /* Initialize media-specific parameters */
496 dev->mtu = CYCX_X25_CHAN_MTU;
497 dev->type = ARPHRD_HWX25; /* ARP h/w type */
498 dev->hard_header_len = 0; /* media header length */
499 dev->addr_len = 0; /* hardware address length */
500
501 if (!chan->svc)
502 *(u16*)dev->dev_addr = htons(chan->lcn);
503
504 /* Initialize hardware parameters (just for reference) */
505 dev->irq = wandev->irq;
506 dev->dma = wandev->dma;
507 dev->base_addr = wandev->ioport;
508 dev->mem_start = (unsigned long)wandev->maddr;
509 dev->mem_end = (unsigned long)(wandev->maddr +
510 wandev->msize - 1);
511 dev->flags |= IFF_NOARP;
512
513 /* Set transmit buffer queue length */
514 dev->tx_queue_len = 10;
515 SET_MODULE_OWNER(dev);
516
517 /* Initialize socket buffers */
518 cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
519
520 return 0;
521}
522
523/* Open network interface.
524 * o prevent module from unloading by incrementing use count
525 * o if link is disconnected then initiate connection
526 *
527 * Return 0 if O.k. or errno. */
528static int cycx_netdevice_open(struct net_device *dev)
529{
530 if (netif_running(dev))
531 return -EBUSY; /* only one open is allowed */
532
533 netif_start_queue(dev);
534 return 0;
535}
536
537/* Close network interface.
538 * o reset flags.
539 * o if there's no more open channels then disconnect physical link. */
540static int cycx_netdevice_stop(struct net_device *dev)
541{
542 struct cycx_x25_channel *chan = dev->priv;
543
544 netif_stop_queue(dev);
545
546 if (chan->state == WAN_CONNECTED || chan->state == WAN_CONNECTING)
547 cycx_x25_chan_disconnect(dev);
548
549 return 0;
550}
551
552/* Build media header.
553 * o encapsulate packet according to encapsulation type.
554 *
555 * The trick here is to put packet type (Ethertype) into 'protocol' field of
556 * the socket buffer, so that we don't forget it. If encapsulation fails,
557 * set skb->protocol to 0 and discard packet later.
558 *
559 * Return: media header length. */
560static int cycx_netdevice_hard_header(struct sk_buff *skb,
561 struct net_device *dev, u16 type,
562 void *daddr, void *saddr, unsigned len)
563{
564 skb->protocol = type;
565
566 return dev->hard_header_len;
567}
568
569/* * Re-build media header.
570 * Return: 1 physical address resolved.
571 * 0 physical address not resolved */
572static int cycx_netdevice_rebuild_header(struct sk_buff *skb)
573{
574 return 1;
575}
576
577/* Send a packet on a network interface.
578 * o set busy flag (marks start of the transmission).
579 * o check link state. If link is not up, then drop the packet.
580 * o check channel status. If it's down then initiate a call.
581 * o pass a packet to corresponding WAN device.
582 * o free socket buffer
583 *
584 * Return: 0 complete (socket buffer must be freed)
585 * non-0 packet may be re-transmitted (tbusy must be set)
586 *
587 * Notes:
588 * 1. This routine is called either by the protocol stack or by the "net
589 * bottom half" (with interrupts enabled).
590 * 2. Setting tbusy flag will inhibit further transmit requests from the
591 * protocol stack and can be used for flow control with protocol layer. */
592static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
593 struct net_device *dev)
594{
595 struct cycx_x25_channel *chan = dev->priv;
596 struct cycx_device *card = chan->card;
597
598 if (!chan->svc)
599 chan->protocol = skb->protocol;
600
601 if (card->wandev.state != WAN_CONNECTED)
602 ++chan->ifstats.tx_dropped;
603 else if (chan->svc && chan->protocol &&
604 chan->protocol != skb->protocol) {
605 printk(KERN_INFO
606 "%s: unsupported Ethertype 0x%04X on interface %s!\n",
607 card->devname, skb->protocol, dev->name);
608 ++chan->ifstats.tx_errors;
609 } else if (chan->protocol == ETH_P_IP) {
610 switch (chan->state) {
611 case WAN_DISCONNECTED:
612 if (cycx_x25_chan_connect(dev)) {
613 netif_stop_queue(dev);
614 return -EBUSY;
615 }
616 /* fall thru */
617 case WAN_CONNECTED:
618 reset_timer(dev);
619 dev->trans_start = jiffies;
620 netif_stop_queue(dev);
621
622 if (cycx_x25_chan_send(dev, skb))
623 return -EBUSY;
624
625 break;
626 default:
627 ++chan->ifstats.tx_dropped;
628 ++card->wandev.stats.tx_dropped;
629 }
630 } else { /* chan->protocol == ETH_P_X25 */
631 switch (skb->data[0]) {
632 case 0: break;
633 case 1: /* Connect request */
634 cycx_x25_chan_connect(dev);
635 goto free_packet;
636 case 2: /* Disconnect request */
637 cycx_x25_chan_disconnect(dev);
638 goto free_packet;
639 default:
640 printk(KERN_INFO
641 "%s: unknown %d x25-iface request on %s!\n",
642 card->devname, skb->data[0], dev->name);
643 ++chan->ifstats.tx_errors;
644 goto free_packet;
645 }
646
647 skb_pull(skb, 1); /* Remove control byte */
648 reset_timer(dev);
649 dev->trans_start = jiffies;
650 netif_stop_queue(dev);
651
652 if (cycx_x25_chan_send(dev, skb)) {
653 /* prepare for future retransmissions */
654 skb_push(skb, 1);
655 return -EBUSY;
656 }
657 }
658
659free_packet:
660 dev_kfree_skb(skb);
661
662 return 0;
663}
664
665/* Get Ethernet-style interface statistics.
666 * Return a pointer to struct net_device_stats */
667static struct net_device_stats *cycx_netdevice_get_stats(struct net_device *dev)
668{
669 struct cycx_x25_channel *chan = dev->priv;
670
671 return chan ? &chan->ifstats : NULL;
672}
673
674/* Interrupt Handlers */
675/* X.25 Interrupt Service Routine. */
676static void cycx_x25_irq_handler(struct cycx_device *card)
677{
678 struct cycx_x25_cmd cmd;
679 u16 z = 0;
680
681 card->in_isr = 1;
682 card->buff_int_mode_unbusy = 0;
683 cycx_peek(&card->hw, X25_RXMBOX_OFFS, &cmd, sizeof(cmd));
684
685 switch (cmd.command) {
686 case X25_DATA_INDICATION:
687 cycx_x25_irq_rx(card, &cmd);
688 break;
689 case X25_ACK_FROM_VC:
690 cycx_x25_irq_tx(card, &cmd);
691 break;
692 case X25_LOG:
693 cycx_x25_irq_log(card, &cmd);
694 break;
695 case X25_STATISTIC:
696 cycx_x25_irq_stat(card, &cmd);
697 break;
698 case X25_CONNECT_CONFIRM:
699 cycx_x25_irq_connect_confirm(card, &cmd);
700 break;
701 case X25_CONNECT_INDICATION:
702 cycx_x25_irq_connect(card, &cmd);
703 break;
704 case X25_DISCONNECT_INDICATION:
705 cycx_x25_irq_disconnect(card, &cmd);
706 break;
707 case X25_DISCONNECT_CONFIRM:
708 cycx_x25_irq_disconnect_confirm(card, &cmd);
709 break;
710 case X25_LINE_ON:
711 cycx_set_state(card, WAN_CONNECTED);
712 break;
713 case X25_LINE_OFF:
714 cycx_set_state(card, WAN_DISCONNECTED);
715 break;
716 default:
717 cycx_x25_irq_spurious(card, &cmd);
718 break;
719 }
720
721 cycx_poke(&card->hw, 0, &z, sizeof(z));
722 cycx_poke(&card->hw, X25_RXMBOX_OFFS, &z, sizeof(z));
723 card->in_isr = 0;
724}
725
726/* Transmit interrupt handler.
727 * o Release socket buffer
728 * o Clear 'tbusy' flag */
729static void cycx_x25_irq_tx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
730{
731 struct net_device *dev;
732 struct wan_device *wandev = &card->wandev;
733 u8 lcn;
734
735 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
736
737 /* unbusy device and then dev_tint(); */
738 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
739 if (dev) {
740 card->buff_int_mode_unbusy = 1;
741 netif_wake_queue(dev);
742 } else
743 printk(KERN_ERR "%s:ackvc for inexistent lcn %d\n",
744 card->devname, lcn);
745}
746
747/* Receive interrupt handler.
748 * This routine handles fragmented IP packets using M-bit according to the
749 * RFC1356.
750 * o map logical channel number to network interface.
751 * o allocate socket buffer or append received packet to the existing one.
752 * o if M-bit is reset (i.e. it's the last packet in a sequence) then
753 * decapsulate packet and pass socket buffer to the protocol stack.
754 *
755 * Notes:
756 * 1. When allocating a socket buffer, if M-bit is set then more data is
757 * coming and we have to allocate buffer for the maximum IP packet size
758 * expected on this channel.
759 * 2. If something goes wrong and X.25 packet has to be dropped (e.g. no
760 * socket buffers available) the whole packet sequence must be discarded. */
761static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
762{
763 struct wan_device *wandev = &card->wandev;
764 struct net_device *dev;
765 struct cycx_x25_channel *chan;
766 struct sk_buff *skb;
767 u8 bitm, lcn;
768 int pktlen = cmd->len - 5;
769
770 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
771 cycx_peek(&card->hw, cmd->buf + 4, &bitm, sizeof(bitm));
772 bitm &= 0x10;
773
774 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
775 if (!dev) {
776 /* Invalid channel, discard packet */
777 printk(KERN_INFO "%s: receiving on orphaned LCN %d!\n",
778 card->devname, lcn);
779 return;
780 }
781
782 chan = dev->priv;
783 reset_timer(dev);
784
785 if (chan->drop_sequence) {
786 if (!bitm)
787 chan->drop_sequence = 0;
788 else
789 return;
790 }
791
792 if ((skb = chan->rx_skb) == NULL) {
793 /* Allocate new socket buffer */
794 int bufsize = bitm ? dev->mtu : pktlen;
795
796 if ((skb = dev_alloc_skb((chan->protocol == ETH_P_X25 ? 1 : 0) +
797 bufsize +
798 dev->hard_header_len)) == NULL) {
799 printk(KERN_INFO "%s: no socket buffers available!\n",
800 card->devname);
801 chan->drop_sequence = 1;
802 ++chan->ifstats.rx_dropped;
803 return;
804 }
805
806 if (chan->protocol == ETH_P_X25) /* X.25 socket layer control */
807 /* 0 = data packet (dev_alloc_skb zeroed skb->data) */
808 skb_put(skb, 1);
809
810 skb->dev = dev;
811 skb->protocol = htons(chan->protocol);
812 chan->rx_skb = skb;
813 }
814
815 if (skb_tailroom(skb) < pktlen) {
816 /* No room for the packet. Call off the whole thing! */
817 dev_kfree_skb_irq(skb);
818 chan->rx_skb = NULL;
819
820 if (bitm)
821 chan->drop_sequence = 1;
822
823 printk(KERN_INFO "%s: unexpectedly long packet sequence "
824 "on interface %s!\n", card->devname, dev->name);
825 ++chan->ifstats.rx_length_errors;
826 return;
827 }
828
829 /* Append packet to the socket buffer */
830 cycx_peek(&card->hw, cmd->buf + 5, skb_put(skb, pktlen), pktlen);
831
832 if (bitm)
833 return; /* more data is coming */
834
835 chan->rx_skb = NULL; /* dequeue packet */
836
837 ++chan->ifstats.rx_packets;
838 chan->ifstats.rx_bytes += pktlen;
839
840 skb->mac.raw = skb->data;
841 netif_rx(skb);
842 dev->last_rx = jiffies; /* timestamp */
843}
844
845/* Connect interrupt handler. */
846static void cycx_x25_irq_connect(struct cycx_device *card,
847 struct cycx_x25_cmd *cmd)
848{
849 struct wan_device *wandev = &card->wandev;
850 struct net_device *dev = NULL;
851 struct cycx_x25_channel *chan;
852 u8 d[32],
853 loc[24],
854 rem[24];
855 u8 lcn, sizeloc, sizerem;
856
857 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
858 cycx_peek(&card->hw, cmd->buf + 5, &sizeloc, sizeof(sizeloc));
859 cycx_peek(&card->hw, cmd->buf + 6, d, cmd->len - 6);
860
861 sizerem = sizeloc >> 4;
862 sizeloc &= 0x0F;
863
864 loc[0] = rem[0] = '\0';
865
866 if (sizeloc)
867 nibble_to_byte(d, loc, sizeloc, 0);
868
869 if (sizerem)
870 nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1);
871
872 dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n",
873 __FUNCTION__, lcn, loc, rem);
874
875 dev = cycx_x25_get_dev_by_dte_addr(wandev, rem);
876 if (!dev) {
877 /* Invalid channel, discard packet */
878 printk(KERN_INFO "%s: connect not expected: remote %s!\n",
879 card->devname, rem);
880 return;
881 }
882
883 chan = dev->priv;
884 chan->lcn = lcn;
885 cycx_x25_connect_response(card, chan);
886 cycx_x25_set_chan_state(dev, WAN_CONNECTED);
887}
888
889/* Connect confirm interrupt handler. */
890static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
891 struct cycx_x25_cmd *cmd)
892{
893 struct wan_device *wandev = &card->wandev;
894 struct net_device *dev;
895 struct cycx_x25_channel *chan;
896 u8 lcn, key;
897
898 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
899 cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key));
900 dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n",
901 card->devname, __FUNCTION__, lcn, key);
902
903 dev = cycx_x25_get_dev_by_lcn(wandev, -key);
904 if (!dev) {
905 /* Invalid channel, discard packet */
906 clear_bit(--key, (void*)&card->u.x.connection_keys);
907 printk(KERN_INFO "%s: connect confirm not expected: lcn %d, "
908 "key=%d!\n", card->devname, lcn, key);
909 return;
910 }
911
912 clear_bit(--key, (void*)&card->u.x.connection_keys);
913 chan = dev->priv;
914 chan->lcn = lcn;
915 cycx_x25_set_chan_state(dev, WAN_CONNECTED);
916}
917
918/* Disconnect confirm interrupt handler. */
919static void cycx_x25_irq_disconnect_confirm(struct cycx_device *card,
920 struct cycx_x25_cmd *cmd)
921{
922 struct wan_device *wandev = &card->wandev;
923 struct net_device *dev;
924 u8 lcn;
925
926 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
927 dprintk(1, KERN_INFO "%s: %s:lcn=%d\n",
928 card->devname, __FUNCTION__, lcn);
929 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
930 if (!dev) {
931 /* Invalid channel, discard packet */
932 printk(KERN_INFO "%s:disconnect confirm not expected!:lcn %d\n",
933 card->devname, lcn);
934 return;
935 }
936
937 cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
938}
939
940/* disconnect interrupt handler. */
941static void cycx_x25_irq_disconnect(struct cycx_device *card,
942 struct cycx_x25_cmd *cmd)
943{
944 struct wan_device *wandev = &card->wandev;
945 struct net_device *dev;
946 u8 lcn;
947
948 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn));
949 dprintk(1, KERN_INFO "%s:lcn=%d\n", __FUNCTION__, lcn);
950
951 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
952 if (dev) {
953 struct cycx_x25_channel *chan = dev->priv;
954
955 cycx_x25_disconnect_response(card, chan->link, lcn);
956 cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
957 } else
958 cycx_x25_disconnect_response(card, 0, lcn);
959}
960
961/* LOG interrupt handler. */
962static void cycx_x25_irq_log(struct cycx_device *card, struct cycx_x25_cmd *cmd)
963{
964#if CYCLOMX_X25_DEBUG
965 char bf[20];
966 u16 size, toread, link, msg_code;
967 u8 code, routine;
968
969 cycx_peek(&card->hw, cmd->buf, &msg_code, sizeof(msg_code));
970 cycx_peek(&card->hw, cmd->buf + 2, &link, sizeof(link));
971 cycx_peek(&card->hw, cmd->buf + 4, &size, sizeof(size));
972 /* at most 20 bytes are available... thanks to Daniela :) */
973 toread = size < 20 ? size : 20;
974 cycx_peek(&card->hw, cmd->buf + 10, &bf, toread);
975 cycx_peek(&card->hw, cmd->buf + 10 + toread, &code, 1);
976 cycx_peek(&card->hw, cmd->buf + 10 + toread + 1, &routine, 1);
977
978 printk(KERN_INFO "cycx_x25_irq_handler: X25_LOG (0x4500) indic.:\n");
979 printk(KERN_INFO "cmd->buf=0x%X\n", cmd->buf);
980 printk(KERN_INFO "Log message code=0x%X\n", msg_code);
981 printk(KERN_INFO "Link=%d\n", link);
982 printk(KERN_INFO "log code=0x%X\n", code);
983 printk(KERN_INFO "log routine=0x%X\n", routine);
984 printk(KERN_INFO "Message size=%d\n", size);
985 hex_dump("Message", bf, toread);
986#endif
987}
988
989/* STATISTIC interrupt handler. */
990static void cycx_x25_irq_stat(struct cycx_device *card,
991 struct cycx_x25_cmd *cmd)
992{
993 cycx_peek(&card->hw, cmd->buf, &card->u.x.stats,
994 sizeof(card->u.x.stats));
995 hex_dump("cycx_x25_irq_stat", (unsigned char*)&card->u.x.stats,
996 sizeof(card->u.x.stats));
997 cycx_x25_dump_stats(&card->u.x.stats);
998 wake_up_interruptible(&card->wait_stats);
999}
1000
1001/* Spurious interrupt handler.
1002 * o print a warning
1003 * If number of spurious interrupts exceeded some limit, then ??? */
1004static void cycx_x25_irq_spurious(struct cycx_device *card,
1005 struct cycx_x25_cmd *cmd)
1006{
1007 printk(KERN_INFO "%s: spurious interrupt (0x%X)!\n",
1008 card->devname, cmd->command);
1009}
1010#ifdef CYCLOMX_X25_DEBUG
1011static void hex_dump(char *msg, unsigned char *p, int len)
1012{
1013 unsigned char hex[1024],
1014 * phex = hex;
1015
1016 if (len >= (sizeof(hex) / 2))
1017 len = (sizeof(hex) / 2) - 1;
1018
1019 while (len--) {
1020 sprintf(phex, "%02x", *p++);
1021 phex += 2;
1022 }
1023
1024 printk(KERN_INFO "%s: %s\n", msg, hex);
1025}
1026#endif
1027
1028/* Cyclom 2X Firmware-Specific Functions */
1029/* Exec X.25 command. */
1030static int x25_exec(struct cycx_device *card, int command, int link,
1031 void *d1, int len1, void *d2, int len2)
1032{
1033 struct cycx_x25_cmd c;
1034 unsigned long flags;
1035 u32 addr = 0x1200 + 0x2E0 * link + 0x1E2;
1036 u8 retry = CYCX_X25_MAX_CMD_RETRY;
1037 int err = 0;
1038
1039 c.command = command;
1040 c.link = link;
1041 c.len = len1 + len2;
1042
1043 spin_lock_irqsave(&card->u.x.lock, flags);
1044
1045 /* write command */
1046 cycx_poke(&card->hw, X25_MBOX_OFFS, &c, sizeof(c) - sizeof(c.buf));
1047
1048 /* write X.25 data */
1049 if (d1) {
1050 cycx_poke(&card->hw, addr, d1, len1);
1051
1052 if (d2) {
1053 if (len2 > 254) {
1054 u32 addr1 = 0xA00 + 0x400 * link;
1055
1056 cycx_poke(&card->hw, addr + len1, d2, 249);
1057 cycx_poke(&card->hw, addr1, ((u8*)d2) + 249,
1058 len2 - 249);
1059 } else
1060 cycx_poke(&card->hw, addr + len1, d2, len2);
1061 }
1062 }
1063
1064 /* generate interruption, executing command */
1065 cycx_intr(&card->hw);
1066
1067 /* wait till card->mbox == 0 */
1068 do {
1069 err = cycx_exec(card->mbox);
1070 } while (retry-- && err);
1071
1072 spin_unlock_irqrestore(&card->u.x.lock, flags);
1073
1074 return err;
1075}
1076
1077/* Configure adapter. */
1078static int cycx_x25_configure(struct cycx_device *card,
1079 struct cycx_x25_config *conf)
1080{
1081 struct {
1082 u16 nlinks;
1083 struct cycx_x25_config conf[2];
1084 } x25_cmd_conf;
1085
1086 memset(&x25_cmd_conf, 0, sizeof(x25_cmd_conf));
1087 x25_cmd_conf.nlinks = 2;
1088 x25_cmd_conf.conf[0] = *conf;
1089 /* FIXME: we need to find a way in the wanrouter framework
1090 to configure the second link, for now lets use it
1091 with the same config from the first link, fixing
1092 the interface type to RS232, the speed in 38400 and
1093 the clock to external */
1094 x25_cmd_conf.conf[1] = *conf;
1095 x25_cmd_conf.conf[1].link = 1;
1096 x25_cmd_conf.conf[1].speed = 5; /* 38400 */
1097 x25_cmd_conf.conf[1].clock = 8;
1098 x25_cmd_conf.conf[1].flags = 0; /* default = RS232 */
1099
1100 cycx_x25_dump_config(&x25_cmd_conf.conf[0]);
1101 cycx_x25_dump_config(&x25_cmd_conf.conf[1]);
1102
1103 return x25_exec(card, X25_CONFIG, 0,
1104 &x25_cmd_conf, sizeof(x25_cmd_conf), NULL, 0);
1105}
1106
1107/* Get protocol statistics. */
1108static int cycx_x25_get_stats(struct cycx_device *card)
1109{
1110 /* the firmware expects 20 in the size field!!!
1111 thanks to Daniela */
1112 int err = x25_exec(card, X25_STATISTIC, 0, NULL, 20, NULL, 0);
1113
1114 if (err)
1115 return err;
1116
1117 interruptible_sleep_on(&card->wait_stats);
1118
1119 if (signal_pending(current))
1120 return -EINTR;
1121
1122 card->wandev.stats.rx_packets = card->u.x.stats.n2_rx_frames;
1123 card->wandev.stats.rx_over_errors = card->u.x.stats.rx_over_errors;
1124 card->wandev.stats.rx_crc_errors = card->u.x.stats.rx_crc_errors;
1125 card->wandev.stats.rx_length_errors = 0; /* not available from fw */
1126 card->wandev.stats.rx_frame_errors = 0; /* not available from fw */
1127 card->wandev.stats.rx_missed_errors = card->u.x.stats.rx_aborts;
1128 card->wandev.stats.rx_dropped = 0; /* not available from fw */
1129 card->wandev.stats.rx_errors = 0; /* not available from fw */
1130 card->wandev.stats.tx_packets = card->u.x.stats.n2_tx_frames;
1131 card->wandev.stats.tx_aborted_errors = card->u.x.stats.tx_aborts;
1132 card->wandev.stats.tx_dropped = 0; /* not available from fw */
1133 card->wandev.stats.collisions = 0; /* not available from fw */
1134 card->wandev.stats.tx_errors = 0; /* not available from fw */
1135
1136 cycx_x25_dump_devs(&card->wandev);
1137
1138 return 0;
1139}
1140
1141/* return the number of nibbles */
1142static int byte_to_nibble(u8 *s, u8 *d, char *nibble)
1143{
1144 int i = 0;
1145
1146 if (*nibble && *s) {
1147 d[i] |= *s++ - '0';
1148 *nibble = 0;
1149 ++i;
1150 }
1151
1152 while (*s) {
1153 d[i] = (*s - '0') << 4;
1154 if (*(s + 1))
1155 d[i] |= *(s + 1) - '0';
1156 else {
1157 *nibble = 1;
1158 break;
1159 }
1160 ++i;
1161 s += 2;
1162 }
1163
1164 return i;
1165}
1166
1167static void nibble_to_byte(u8 *s, u8 *d, u8 len, u8 nibble)
1168{
1169 if (nibble) {
1170 *d++ = '0' + (*s++ & 0x0F);
1171 --len;
1172 }
1173
1174 while (len) {
1175 *d++ = '0' + (*s >> 4);
1176
1177 if (--len) {
1178 *d++ = '0' + (*s & 0x0F);
1179 --len;
1180 } else break;
1181
1182 ++s;
1183 }
1184
1185 *d = '\0';
1186}
1187
1188/* Place X.25 call. */
1189static int x25_place_call(struct cycx_device *card,
1190 struct cycx_x25_channel *chan)
1191{
1192 int err = 0,
1193 len;
1194 char d[64],
1195 nibble = 0,
1196 mylen = chan->local_addr ? strlen(chan->local_addr) : 0,
1197 remotelen = strlen(chan->addr);
1198 u8 key;
1199
1200 if (card->u.x.connection_keys == ~0U) {
1201 printk(KERN_INFO "%s: too many simultaneous connection "
1202 "requests!\n", card->devname);
1203 return -EAGAIN;
1204 }
1205
1206 key = ffz(card->u.x.connection_keys);
1207 set_bit(key, (void*)&card->u.x.connection_keys);
1208 ++key;
1209 dprintk(1, KERN_INFO "%s:x25_place_call:key=%d\n", card->devname, key);
1210 memset(d, 0, sizeof(d));
1211 d[1] = key; /* user key */
1212 d[2] = 0x10;
1213 d[4] = 0x0B;
1214
1215 len = byte_to_nibble(chan->addr, d + 6, &nibble);
1216
1217 if (chan->local_addr)
1218 len += byte_to_nibble(chan->local_addr, d + 6 + len, &nibble);
1219
1220 if (nibble)
1221 ++len;
1222
1223 d[5] = mylen << 4 | remotelen;
1224 d[6 + len + 1] = 0xCC; /* TCP/IP over X.25, thanks to Daniela :) */
1225
1226 if ((err = x25_exec(card, X25_CONNECT_REQUEST, chan->link,
1227 &d, 7 + len + 1, NULL, 0)) != 0)
1228 clear_bit(--key, (void*)&card->u.x.connection_keys);
1229 else
1230 chan->lcn = -key;
1231
1232 return err;
1233}
1234
1235/* Place X.25 CONNECT RESPONSE. */
1236static int cycx_x25_connect_response(struct cycx_device *card,
1237 struct cycx_x25_channel *chan)
1238{
1239 u8 d[8];
1240
1241 memset(d, 0, sizeof(d));
1242 d[0] = d[3] = chan->lcn;
1243 d[2] = 0x10;
1244 d[4] = 0x0F;
1245 d[7] = 0xCC; /* TCP/IP over X.25, thanks Daniela */
1246
1247 return x25_exec(card, X25_CONNECT_RESPONSE, chan->link, &d, 8, NULL, 0);
1248}
1249
1250/* Place X.25 DISCONNECT RESPONSE. */
1251static int cycx_x25_disconnect_response(struct cycx_device *card, u8 link,
1252 u8 lcn)
1253{
1254 char d[5];
1255
1256 memset(d, 0, sizeof(d));
1257 d[0] = d[3] = lcn;
1258 d[2] = 0x10;
1259 d[4] = 0x17;
1260
1261 return x25_exec(card, X25_DISCONNECT_RESPONSE, link, &d, 5, NULL, 0);
1262}
1263
1264/* Clear X.25 call. */
1265static int x25_clear_call(struct cycx_device *card, u8 link, u8 lcn, u8 cause,
1266 u8 diagn)
1267{
1268 u8 d[7];
1269
1270 memset(d, 0, sizeof(d));
1271 d[0] = d[3] = lcn;
1272 d[2] = 0x10;
1273 d[4] = 0x13;
1274 d[5] = cause;
1275 d[6] = diagn;
1276
1277 return x25_exec(card, X25_DISCONNECT_REQUEST, link, d, 7, NULL, 0);
1278}
1279
1280/* Send X.25 data packet. */
1281static int cycx_x25_send(struct cycx_device *card, u8 link, u8 lcn, u8 bitm,
1282 int len, void *buf)
1283{
1284 u8 d[] = "?\xFF\x10??";
1285
1286 d[0] = d[3] = lcn;
1287 d[4] = bitm;
1288
1289 return x25_exec(card, X25_DATA_REQUEST, link, &d, 5, buf, len);
1290}
1291
1292/* Miscellaneous */
1293/* Find network device by its channel number. */
1294static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
1295 s16 lcn)
1296{
1297 struct net_device *dev = wandev->dev;
1298 struct cycx_x25_channel *chan;
1299
1300 while (dev) {
1301 chan = (struct cycx_x25_channel*)dev->priv;
1302
1303 if (chan->lcn == lcn)
1304 break;
1305 dev = chan->slave;
1306 }
1307 return dev;
1308}
1309
1310/* Find network device by its remote dte address. */
1311static struct net_device *
1312 cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte)
1313{
1314 struct net_device *dev = wandev->dev;
1315 struct cycx_x25_channel *chan;
1316
1317 while (dev) {
1318 chan = (struct cycx_x25_channel*)dev->priv;
1319
1320 if (!strcmp(chan->addr, dte))
1321 break;
1322 dev = chan->slave;
1323 }
1324 return dev;
1325}
1326
1327/* Initiate connection on the logical channel.
1328 * o for PVC we just get channel configuration
1329 * o for SVCs place an X.25 call
1330 *
1331 * Return: 0 connected
1332 * >0 connection in progress
1333 * <0 failure */
1334static int cycx_x25_chan_connect(struct net_device *dev)
1335{
1336 struct cycx_x25_channel *chan = dev->priv;
1337 struct cycx_device *card = chan->card;
1338
1339 if (chan->svc) {
1340 if (!chan->addr[0])
1341 return -EINVAL; /* no destination address */
1342
1343 dprintk(1, KERN_INFO "%s: placing X.25 call to %s...\n",
1344 card->devname, chan->addr);
1345
1346 if (x25_place_call(card, chan))
1347 return -EIO;
1348
1349 cycx_x25_set_chan_state(dev, WAN_CONNECTING);
1350 return 1;
1351 } else
1352 cycx_x25_set_chan_state(dev, WAN_CONNECTED);
1353
1354 return 0;
1355}
1356
1357/* Disconnect logical channel.
1358 * o if SVC then clear X.25 call */
1359static void cycx_x25_chan_disconnect(struct net_device *dev)
1360{
1361 struct cycx_x25_channel *chan = dev->priv;
1362
1363 if (chan->svc) {
1364 x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0);
1365 cycx_x25_set_chan_state(dev, WAN_DISCONNECTING);
1366 } else
1367 cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
1368}
1369
1370/* Called by kernel timer */
1371static void cycx_x25_chan_timer(unsigned long d)
1372{
1373 struct net_device *dev = (struct net_device *)d;
1374 struct cycx_x25_channel *chan = dev->priv;
1375
1376 if (chan->state == WAN_CONNECTED)
1377 cycx_x25_chan_disconnect(dev);
1378 else
1379 printk(KERN_ERR "%s: %s for svc (%s) not connected!\n",
1380 chan->card->devname, __FUNCTION__, dev->name);
1381}
1382
1383/* Set logical channel state. */
1384static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
1385{
1386 struct cycx_x25_channel *chan = dev->priv;
1387 struct cycx_device *card = chan->card;
1388 unsigned long flags;
1389 char *string_state = NULL;
1390
1391 spin_lock_irqsave(&card->lock, flags);
1392
1393 if (chan->state != state) {
1394 if (chan->svc && chan->state == WAN_CONNECTED)
1395 del_timer(&chan->timer);
1396
1397 switch (state) {
1398 case WAN_CONNECTED:
1399 string_state = "connected!";
1400 *(u16*)dev->dev_addr = htons(chan->lcn);
1401 netif_wake_queue(dev);
1402 reset_timer(dev);
1403
1404 if (chan->protocol == ETH_P_X25)
1405 cycx_x25_chan_send_event(dev, 1);
1406
1407 break;
1408 case WAN_CONNECTING:
1409 string_state = "connecting...";
1410 break;
1411 case WAN_DISCONNECTING:
1412 string_state = "disconnecting...";
1413 break;
1414 case WAN_DISCONNECTED:
1415 string_state = "disconnected!";
1416
1417 if (chan->svc) {
1418 *(unsigned short*)dev->dev_addr = 0;
1419 chan->lcn = 0;
1420 }
1421
1422 if (chan->protocol == ETH_P_X25)
1423 cycx_x25_chan_send_event(dev, 2);
1424
1425 netif_wake_queue(dev);
1426 break;
1427 }
1428
1429 printk(KERN_INFO "%s: interface %s %s\n", card->devname,
1430 dev->name, string_state);
1431 chan->state = state;
1432 }
1433
1434 spin_unlock_irqrestore(&card->lock, flags);
1435}
1436
1437/* Send packet on a logical channel.
1438 * When this function is called, tx_skb field of the channel data space
1439 * points to the transmit socket buffer. When transmission is complete,
1440 * release socket buffer and reset 'tbusy' flag.
1441 *
1442 * Return: 0 - transmission complete
1443 * 1 - busy
1444 *
1445 * Notes:
1446 * 1. If packet length is greater than MTU for this channel, we'll fragment
1447 * the packet into 'complete sequence' using M-bit.
1448 * 2. When transmission is complete, an event notification should be issued
1449 * to the router. */
1450static int cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb)
1451{
1452 struct cycx_x25_channel *chan = dev->priv;
1453 struct cycx_device *card = chan->card;
1454 int bitm = 0; /* final packet */
1455 unsigned len = skb->len;
1456
1457 if (skb->len > card->wandev.mtu) {
1458 len = card->wandev.mtu;
1459 bitm = 0x10; /* set M-bit (more data) */
1460 }
1461
1462 if (cycx_x25_send(card, chan->link, chan->lcn, bitm, len, skb->data))
1463 return 1;
1464
1465 if (bitm) {
1466 skb_pull(skb, len);
1467 return 1;
1468 }
1469
1470 ++chan->ifstats.tx_packets;
1471 chan->ifstats.tx_bytes += len;
1472
1473 return 0;
1474}
1475
1476/* Send event (connection, disconnection, etc) to X.25 socket layer */
1477
1478static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
1479{
1480 struct sk_buff *skb;
1481 unsigned char *ptr;
1482
1483 if ((skb = dev_alloc_skb(1)) == NULL) {
1484 printk(KERN_ERR "%s: out of memory\n", __FUNCTION__);
1485 return;
1486 }
1487
1488 ptr = skb_put(skb, 1);
1489 *ptr = event;
1490
1491 skb->protocol = x25_type_trans(skb, dev);
1492 netif_rx(skb);
1493 dev->last_rx = jiffies; /* timestamp */
1494}
1495
1496/* Convert line speed in bps to a number used by cyclom 2x code. */
1497static u8 bps_to_speed_code(u32 bps)
1498{
1499 u8 number = 0; /* defaults to the lowest (1200) speed ;> */
1500
1501 if (bps >= 512000) number = 8;
1502 else if (bps >= 256000) number = 7;
1503 else if (bps >= 64000) number = 6;
1504 else if (bps >= 38400) number = 5;
1505 else if (bps >= 19200) number = 4;
1506 else if (bps >= 9600) number = 3;
1507 else if (bps >= 4800) number = 2;
1508 else if (bps >= 2400) number = 1;
1509
1510 return number;
1511}
1512
1513/* log base 2 */
1514static u8 cycx_log2(u32 n)
1515{
1516 u8 log = 0;
1517
1518 if (!n)
1519 return 0;
1520
1521 while (n > 1) {
1522 n >>= 1;
1523 ++log;
1524 }
1525
1526 return log;
1527}
1528
1529/* Convert decimal string to unsigned integer.
1530 * If len != 0 then only 'len' characters of the string are converted. */
1531static unsigned dec_to_uint(u8 *str, int len)
1532{
1533 unsigned val = 0;
1534
1535 if (!len)
1536 len = strlen(str);
1537
1538 for (; len && is_digit(*str); ++str, --len)
1539 val = (val * 10) + (*str - (unsigned) '0');
1540
1541 return val;
1542}
1543
1544static void reset_timer(struct net_device *dev)
1545{
1546 struct cycx_x25_channel *chan = dev->priv;
1547
1548 if (chan->svc)
1549 mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ);
1550}
1551#ifdef CYCLOMX_X25_DEBUG
1552static void cycx_x25_dump_config(struct cycx_x25_config *conf)
1553{
1554 printk(KERN_INFO "X.25 configuration\n");
1555 printk(KERN_INFO "-----------------\n");
1556 printk(KERN_INFO "link number=%d\n", conf->link);
1557 printk(KERN_INFO "line speed=%d\n", conf->speed);
1558 printk(KERN_INFO "clock=%sternal\n", conf->clock == 8 ? "Ex" : "In");
1559 printk(KERN_INFO "# level 2 retransm.=%d\n", conf->n2);
1560 printk(KERN_INFO "level 2 window=%d\n", conf->n2win);
1561 printk(KERN_INFO "level 3 window=%d\n", conf->n3win);
1562 printk(KERN_INFO "# logical channels=%d\n", conf->nvc);
1563 printk(KERN_INFO "level 3 pkt len=%d\n", conf->pktlen);
1564 printk(KERN_INFO "my address=%d\n", conf->locaddr);
1565 printk(KERN_INFO "remote address=%d\n", conf->remaddr);
1566 printk(KERN_INFO "t1=%d seconds\n", conf->t1);
1567 printk(KERN_INFO "t2=%d seconds\n", conf->t2);
1568 printk(KERN_INFO "t21=%d seconds\n", conf->t21);
1569 printk(KERN_INFO "# PVCs=%d\n", conf->npvc);
1570 printk(KERN_INFO "t23=%d seconds\n", conf->t23);
1571 printk(KERN_INFO "flags=0x%x\n", conf->flags);
1572}
1573
1574static void cycx_x25_dump_stats(struct cycx_x25_stats *stats)
1575{
1576 printk(KERN_INFO "X.25 statistics\n");
1577 printk(KERN_INFO "--------------\n");
1578 printk(KERN_INFO "rx_crc_errors=%d\n", stats->rx_crc_errors);
1579 printk(KERN_INFO "rx_over_errors=%d\n", stats->rx_over_errors);
1580 printk(KERN_INFO "n2_tx_frames=%d\n", stats->n2_tx_frames);
1581 printk(KERN_INFO "n2_rx_frames=%d\n", stats->n2_rx_frames);
1582 printk(KERN_INFO "tx_timeouts=%d\n", stats->tx_timeouts);
1583 printk(KERN_INFO "rx_timeouts=%d\n", stats->rx_timeouts);
1584 printk(KERN_INFO "n3_tx_packets=%d\n", stats->n3_tx_packets);
1585 printk(KERN_INFO "n3_rx_packets=%d\n", stats->n3_rx_packets);
1586 printk(KERN_INFO "tx_aborts=%d\n", stats->tx_aborts);
1587 printk(KERN_INFO "rx_aborts=%d\n", stats->rx_aborts);
1588}
1589
1590static void cycx_x25_dump_devs(struct wan_device *wandev)
1591{
1592 struct net_device *dev = wandev->dev;
1593
1594 printk(KERN_INFO "X.25 dev states\n");
1595 printk(KERN_INFO "name: addr: txoff: protocol:\n");
1596 printk(KERN_INFO "---------------------------------------\n");
1597
1598 while(dev) {
1599 struct cycx_x25_channel *chan = dev->priv;
1600
1601 printk(KERN_INFO "%-5.5s %-15.15s %d ETH_P_%s\n",
1602 chan->name, chan->addr, netif_queue_stopped(dev),
1603 chan->protocol == ETH_P_IP ? "IP" : "X25");
1604 dev = chan->slave;
1605 }
1606}
1607
1608#endif /* CYCLOMX_X25_DEBUG */
1609/* End */
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
new file mode 100644
index 000000000000..6e1ec5bf22fc
--- /dev/null
+++ b/drivers/net/wan/dlci.c
@@ -0,0 +1,566 @@
1/*
2 * DLCI Implementation of Frame Relay protocol for Linux, according to
3 * RFC 1490. This generic device provides en/decapsulation for an
4 * underlying hardware driver. Routes & IPs are assigned to these
5 * interfaces. Requires 'dlcicfg' program to create usable
6 * interfaces, the initial one, 'dlci' is for IOCTL use only.
7 *
8 * Version: @(#)dlci.c 0.35 4 Jan 1997
9 *
10 * Author: Mike McLagan <mike.mclagan@linux.org>
11 *
12 * Changes:
13 *
14 * 0.15 Mike Mclagan Packet freeing, bug in kmalloc call
15 * DLCI_RET handling
16 * 0.20 Mike McLagan More conservative on which packets
17 * are returned for retry and which are
18 * are dropped. If DLCI_RET_DROP is
19 * returned from the FRAD, the packet is
20 * sent back to Linux for re-transmission
21 * 0.25 Mike McLagan Converted to use SIOC IOCTL calls
22 * 0.30 Jim Freeman Fixed to allow IPX traffic
23 * 0.35 Michael Elizabeth Fixed incorrect memcpy_fromfs
24 *
25 * This program is free software; you can redistribute it and/or
26 * modify it under the terms of the GNU General Public License
27 * as published by the Free Software Foundation; either version
28 * 2 of the License, or (at your option) any later version.
29 */
30
31#include <linux/config.h> /* for CONFIG_DLCI_COUNT */
32#include <linux/module.h>
33#include <linux/kernel.h>
34#include <linux/types.h>
35#include <linux/fcntl.h>
36#include <linux/interrupt.h>
37#include <linux/ptrace.h>
38#include <linux/ioport.h>
39#include <linux/in.h>
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/string.h>
43#include <linux/errno.h>
44#include <linux/netdevice.h>
45#include <linux/skbuff.h>
46#include <linux/if_arp.h>
47#include <linux/if_frad.h>
48#include <linux/bitops.h>
49
50#include <net/sock.h>
51
52#include <asm/system.h>
53#include <asm/io.h>
54#include <asm/dma.h>
55#include <asm/uaccess.h>
56
57static const char version[] = "DLCI driver v0.35, 4 Jan 1997, mike.mclagan@linux.org";
58
59static LIST_HEAD(dlci_devs);
60
61static void dlci_setup(struct net_device *);
62
63/*
64 * these encapsulate the RFC 1490 requirements as well as
65 * deal with packet transmission and reception, working with
66 * the upper network layers
67 */
68
69static int dlci_header(struct sk_buff *skb, struct net_device *dev,
70 unsigned short type, void *daddr, void *saddr,
71 unsigned len)
72{
73 struct frhdr hdr;
74 struct dlci_local *dlp;
75 unsigned int hlen;
76 char *dest;
77
78 dlp = dev->priv;
79
80 hdr.control = FRAD_I_UI;
81 switch(type)
82 {
83 case ETH_P_IP:
84 hdr.IP_NLPID = FRAD_P_IP;
85 hlen = sizeof(hdr.control) + sizeof(hdr.IP_NLPID);
86 break;
87
88 /* feel free to add other types, if necessary */
89
90 default:
91 hdr.pad = FRAD_P_PADDING;
92 hdr.NLPID = FRAD_P_SNAP;
93 memset(hdr.OUI, 0, sizeof(hdr.OUI));
94 hdr.PID = htons(type);
95 hlen = sizeof(hdr);
96 break;
97 }
98
99 dest = skb_push(skb, hlen);
100 if (!dest)
101 return(0);
102
103 memcpy(dest, &hdr, hlen);
104
105 return(hlen);
106}
107
108static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
109{
110 struct dlci_local *dlp;
111 struct frhdr *hdr;
112 int process, header;
113
114 dlp = dev->priv;
115 if (!pskb_may_pull(skb, sizeof(*hdr))) {
116 printk(KERN_NOTICE "%s: invalid data no header\n",
117 dev->name);
118 dlp->stats.rx_errors++;
119 kfree_skb(skb);
120 return;
121 }
122
123 hdr = (struct frhdr *) skb->data;
124 process = 0;
125 header = 0;
126 skb->dev = dev;
127
128 if (hdr->control != FRAD_I_UI)
129 {
130 printk(KERN_NOTICE "%s: Invalid header flag 0x%02X.\n", dev->name, hdr->control);
131 dlp->stats.rx_errors++;
132 }
133 else
134 switch(hdr->IP_NLPID)
135 {
136 case FRAD_P_PADDING:
137 if (hdr->NLPID != FRAD_P_SNAP)
138 {
139 printk(KERN_NOTICE "%s: Unsupported NLPID 0x%02X.\n", dev->name, hdr->NLPID);
140 dlp->stats.rx_errors++;
141 break;
142 }
143
144 if (hdr->OUI[0] + hdr->OUI[1] + hdr->OUI[2] != 0)
145 {
146 printk(KERN_NOTICE "%s: Unsupported organizationally unique identifier 0x%02X-%02X-%02X.\n", dev->name, hdr->OUI[0], hdr->OUI[1], hdr->OUI[2]);
147 dlp->stats.rx_errors++;
148 break;
149 }
150
151 /* at this point, it's an EtherType frame */
152 header = sizeof(struct frhdr);
153 /* Already in network order ! */
154 skb->protocol = hdr->PID;
155 process = 1;
156 break;
157
158 case FRAD_P_IP:
159 header = sizeof(hdr->control) + sizeof(hdr->IP_NLPID);
160 skb->protocol = htons(ETH_P_IP);
161 process = 1;
162 break;
163
164 case FRAD_P_SNAP:
165 case FRAD_P_Q933:
166 case FRAD_P_CLNP:
167 printk(KERN_NOTICE "%s: Unsupported NLPID 0x%02X.\n", dev->name, hdr->pad);
168 dlp->stats.rx_errors++;
169 break;
170
171 default:
172 printk(KERN_NOTICE "%s: Invalid pad byte 0x%02X.\n", dev->name, hdr->pad);
173 dlp->stats.rx_errors++;
174 break;
175 }
176
177 if (process)
178 {
179 /* we've set up the protocol, so discard the header */
180 skb->mac.raw = skb->data;
181 skb_pull(skb, header);
182 dlp->stats.rx_bytes += skb->len;
183 netif_rx(skb);
184 dlp->stats.rx_packets++;
185 dev->last_rx = jiffies;
186 }
187 else
188 dev_kfree_skb(skb);
189}
190
191static int dlci_transmit(struct sk_buff *skb, struct net_device *dev)
192{
193 struct dlci_local *dlp;
194 int ret;
195
196 ret = 0;
197
198 if (!skb || !dev)
199 return(0);
200
201 dlp = dev->priv;
202
203 netif_stop_queue(dev);
204
205 ret = dlp->slave->hard_start_xmit(skb, dlp->slave);
206 switch (ret)
207 {
208 case DLCI_RET_OK:
209 dlp->stats.tx_packets++;
210 ret = 0;
211 break;
212 case DLCI_RET_ERR:
213 dlp->stats.tx_errors++;
214 ret = 0;
215 break;
216 case DLCI_RET_DROP:
217 dlp->stats.tx_dropped++;
218 ret = 1;
219 break;
220 }
221 /* Alan Cox recommends always returning 0, and always freeing the packet */
222 /* experience suggest a slightly more conservative approach */
223
224 if (!ret)
225 {
226 dev_kfree_skb(skb);
227 netif_wake_queue(dev);
228 }
229 return(ret);
230}
231
232static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, int get)
233{
234 struct dlci_conf config;
235 struct dlci_local *dlp;
236 struct frad_local *flp;
237 int err;
238
239 dlp = dev->priv;
240
241 flp = dlp->slave->priv;
242
243 if (!get)
244 {
245 if(copy_from_user(&config, conf, sizeof(struct dlci_conf)))
246 return -EFAULT;
247 if (config.flags & ~DLCI_VALID_FLAGS)
248 return(-EINVAL);
249 memcpy(&dlp->config, &config, sizeof(struct dlci_conf));
250 dlp->configured = 1;
251 }
252
253 err = (*flp->dlci_conf)(dlp->slave, dev, get);
254 if (err)
255 return(err);
256
257 if (get)
258 {
259 if(copy_to_user(conf, &dlp->config, sizeof(struct dlci_conf)))
260 return -EFAULT;
261 }
262
263 return(0);
264}
265
266static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
267{
268 struct dlci_local *dlp;
269
270 if (!capable(CAP_NET_ADMIN))
271 return(-EPERM);
272
273 dlp = dev->priv;
274
275 switch(cmd)
276 {
277 case DLCI_GET_SLAVE:
278 if (!*(short *)(dev->dev_addr))
279 return(-EINVAL);
280
281 strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave));
282 break;
283
284 case DLCI_GET_CONF:
285 case DLCI_SET_CONF:
286 if (!*(short *)(dev->dev_addr))
287 return(-EINVAL);
288
289 return(dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF));
290 break;
291
292 default:
293 return(-EOPNOTSUPP);
294 }
295 return(0);
296}
297
298static int dlci_change_mtu(struct net_device *dev, int new_mtu)
299{
300 struct dlci_local *dlp;
301
302 dlp = dev->priv;
303
304 return((*dlp->slave->change_mtu)(dlp->slave, new_mtu));
305}
306
307static int dlci_open(struct net_device *dev)
308{
309 struct dlci_local *dlp;
310 struct frad_local *flp;
311 int err;
312
313 dlp = dev->priv;
314
315 if (!*(short *)(dev->dev_addr))
316 return(-EINVAL);
317
318 if (!netif_running(dlp->slave))
319 return(-ENOTCONN);
320
321 flp = dlp->slave->priv;
322 err = (*flp->activate)(dlp->slave, dev);
323 if (err)
324 return(err);
325
326 netif_start_queue(dev);
327
328 return 0;
329}
330
331static int dlci_close(struct net_device *dev)
332{
333 struct dlci_local *dlp;
334 struct frad_local *flp;
335 int err;
336
337 netif_stop_queue(dev);
338
339 dlp = dev->priv;
340
341 flp = dlp->slave->priv;
342 err = (*flp->deactivate)(dlp->slave, dev);
343
344 return 0;
345}
346
347static struct net_device_stats *dlci_get_stats(struct net_device *dev)
348{
349 struct dlci_local *dlp;
350
351 dlp = dev->priv;
352
353 return(&dlp->stats);
354}
355
356static int dlci_add(struct dlci_add *dlci)
357{
358 struct net_device *master, *slave;
359 struct dlci_local *dlp;
360 struct frad_local *flp;
361 int err = -EINVAL;
362
363
364 /* validate slave device */
365 slave = dev_get_by_name(dlci->devname);
366 if (!slave)
367 return -ENODEV;
368
369 if (slave->type != ARPHRD_FRAD || slave->priv == NULL)
370 goto err1;
371
372 /* create device name */
373 master = alloc_netdev( sizeof(struct dlci_local), "dlci%d",
374 dlci_setup);
375 if (!master) {
376 err = -ENOMEM;
377 goto err1;
378 }
379
380 /* make sure same slave not already registered */
381 rtnl_lock();
382 list_for_each_entry(dlp, &dlci_devs, list) {
383 if (dlp->slave == slave) {
384 err = -EBUSY;
385 goto err2;
386 }
387 }
388
389 err = dev_alloc_name(master, master->name);
390 if (err < 0)
391 goto err2;
392
393 *(short *)(master->dev_addr) = dlci->dlci;
394
395 dlp = (struct dlci_local *) master->priv;
396 dlp->slave = slave;
397 dlp->master = master;
398
399 flp = slave->priv;
400 err = (*flp->assoc)(slave, master);
401 if (err < 0)
402 goto err2;
403
404 err = register_netdevice(master);
405 if (err < 0)
406 goto err2;
407
408 strcpy(dlci->devname, master->name);
409
410 list_add(&dlp->list, &dlci_devs);
411 rtnl_unlock();
412
413 return(0);
414
415 err2:
416 rtnl_unlock();
417 free_netdev(master);
418 err1:
419 dev_put(slave);
420 return(err);
421}
422
423static int dlci_del(struct dlci_add *dlci)
424{
425 struct dlci_local *dlp;
426 struct frad_local *flp;
427 struct net_device *master, *slave;
428 int err;
429
430 /* validate slave device */
431 master = __dev_get_by_name(dlci->devname);
432 if (!master)
433 return(-ENODEV);
434
435 if (netif_running(master)) {
436 return(-EBUSY);
437 }
438
439 dlp = master->priv;
440 slave = dlp->slave;
441 flp = slave->priv;
442
443 rtnl_lock();
444 err = (*flp->deassoc)(slave, master);
445 if (!err) {
446 list_del(&dlp->list);
447
448 unregister_netdevice(master);
449
450 dev_put(slave);
451 }
452 rtnl_unlock();
453
454 return(err);
455}
456
457static int dlci_ioctl(unsigned int cmd, void __user *arg)
458{
459 struct dlci_add add;
460 int err;
461
462 if (!capable(CAP_NET_ADMIN))
463 return(-EPERM);
464
465 if(copy_from_user(&add, arg, sizeof(struct dlci_add)))
466 return -EFAULT;
467
468 switch (cmd)
469 {
470 case SIOCADDDLCI:
471 err = dlci_add(&add);
472
473 if (!err)
474 if(copy_to_user(arg, &add, sizeof(struct dlci_add)))
475 return -EFAULT;
476 break;
477
478 case SIOCDELDLCI:
479 err = dlci_del(&add);
480 break;
481
482 default:
483 err = -EINVAL;
484 }
485
486 return(err);
487}
488
489static void dlci_setup(struct net_device *dev)
490{
491 struct dlci_local *dlp = dev->priv;
492
493 dev->flags = 0;
494 dev->open = dlci_open;
495 dev->stop = dlci_close;
496 dev->do_ioctl = dlci_dev_ioctl;
497 dev->hard_start_xmit = dlci_transmit;
498 dev->hard_header = dlci_header;
499 dev->get_stats = dlci_get_stats;
500 dev->change_mtu = dlci_change_mtu;
501 dev->destructor = free_netdev;
502
503 dlp->receive = dlci_receive;
504
505 dev->type = ARPHRD_DLCI;
506 dev->hard_header_len = sizeof(struct frhdr);
507 dev->addr_len = sizeof(short);
508
509}
510
511/* if slave is unregistering, then cleanup master */
512static int dlci_dev_event(struct notifier_block *unused,
513 unsigned long event, void *ptr)
514{
515 struct net_device *dev = (struct net_device *) ptr;
516
517 if (event == NETDEV_UNREGISTER) {
518 struct dlci_local *dlp;
519
520 list_for_each_entry(dlp, &dlci_devs, list) {
521 if (dlp->slave == dev) {
522 list_del(&dlp->list);
523 unregister_netdevice(dlp->master);
524 dev_put(dlp->slave);
525 break;
526 }
527 }
528 }
529 return NOTIFY_DONE;
530}
531
532static struct notifier_block dlci_notifier = {
533 .notifier_call = dlci_dev_event,
534};
535
536static int __init init_dlci(void)
537{
538 dlci_ioctl_set(dlci_ioctl);
539 register_netdevice_notifier(&dlci_notifier);
540
541 printk("%s.\n", version);
542
543 return 0;
544}
545
546static void __exit dlci_exit(void)
547{
548 struct dlci_local *dlp, *nxt;
549
550 dlci_ioctl_set(NULL);
551 unregister_netdevice_notifier(&dlci_notifier);
552
553 rtnl_lock();
554 list_for_each_entry_safe(dlp, nxt, &dlci_devs, list) {
555 unregister_netdevice(dlp->master);
556 dev_put(dlp->slave);
557 }
558 rtnl_unlock();
559}
560
561module_init(init_dlci);
562module_exit(dlci_exit);
563
564MODULE_AUTHOR("Mike McLagan");
565MODULE_DESCRIPTION("Frame Relay DLCI layer");
566MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
new file mode 100644
index 000000000000..520a77a798e2
--- /dev/null
+++ b/drivers/net/wan/dscc4.c
@@ -0,0 +1,2074 @@
1/*
2 * drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux
3 *
4 * This software may be used and distributed according to the terms of the
5 * GNU General Public License.
6 *
7 * The author may be reached as romieu@cogenit.fr.
8 * Specific bug reports/asian food will be welcome.
9 *
10 * Special thanks to the nice people at CS-Telecom for the hardware and the
11 * access to the test/measure tools.
12 *
13 *
14 * Theory of Operation
15 *
16 * I. Board Compatibility
17 *
18 * This device driver is designed for the Siemens PEB20534 4 ports serial
19 * controller as found on Etinc PCISYNC cards. The documentation for the
20 * chipset is available at http://www.infineon.com:
21 * - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with
22 * 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1";
23 * - Application Hint "Management of DSCC4 on-chip FIFO resources".
24 * - Errata sheet DS5 (courtesy of Michael Skerritt).
25 * Jens David has built an adapter based on the same chipset. Take a look
26 * at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific
27 * driver.
28 * Sample code (2 revisions) is available at Infineon.
29 *
30 * II. Board-specific settings
31 *
32 * Pcisync can transmit some clock signal to the outside world on the
33 * *first two* ports provided you put a quartz and a line driver on it and
34 * remove the jumpers. The operation is described on Etinc web site. If you
35 * go DCE on these ports, don't forget to use an adequate cable.
36 *
37 * Sharing of the PCI interrupt line for this board is possible.
38 *
39 * III. Driver operation
40 *
41 * The rx/tx operations are based on a linked list of descriptors. The driver
42 * doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more
43 * I tried to fix it, the more it started to look like (convoluted) software
44 * mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider
45 * this a rfc2119 MUST.
46 *
47 * Tx direction
48 * When the tx ring is full, the xmit routine issues a call to netdev_stop.
49 * The device is supposed to be enabled again during an ALLS irq (we could
50 * use HI but as it's easy to lose events, it's fscked).
51 *
52 * Rx direction
53 * The received frames aren't supposed to span over multiple receiving areas.
54 * I may implement it some day but it isn't the highest ranked item.
55 *
56 * IV. Notes
57 * The current error (XDU, RFO) recovery code is untested.
58 * So far, RDO takes his RX channel down and the right sequence to enable it
59 * again is still a mistery. If RDO happens, plan a reboot. More details
60 * in the code (NB: as this happens, TX still works).
61 * Don't mess the cables during operation, especially on DTE ports. I don't
62 * suggest it for DCE either but at least one can get some messages instead
63 * of a complete instant freeze.
64 * Tests are done on Rev. 20 of the silicium. The RDO handling changes with
65 * the documentation/chipset releases.
66 *
67 * TODO:
68 * - test X25.
69 * - use polling at high irq/s,
70 * - performance analysis,
71 * - endianness.
72 *
73 * 2001/12/10 Daniela Squassoni <daniela@cyclades.com>
74 * - Contribution to support the new generic HDLC layer.
75 *
76 * 2002/01 Ueimor
77 * - old style interface removal
78 * - dscc4_release_ring fix (related to DMA mapping)
79 * - hard_start_xmit fix (hint: TxSizeMax)
80 * - misc crapectomy.
81 */
82
83#include <linux/module.h>
84#include <linux/types.h>
85#include <linux/errno.h>
86#include <linux/list.h>
87#include <linux/ioport.h>
88#include <linux/pci.h>
89#include <linux/kernel.h>
90#include <linux/mm.h>
91
92#include <asm/system.h>
93#include <asm/cache.h>
94#include <asm/byteorder.h>
95#include <asm/uaccess.h>
96#include <asm/io.h>
97#include <asm/irq.h>
98
99#include <linux/init.h>
100#include <linux/string.h>
101
102#include <linux/if_arp.h>
103#include <linux/netdevice.h>
104#include <linux/skbuff.h>
105#include <linux/delay.h>
106#include <net/syncppp.h>
107#include <linux/hdlc.h>
108
109/* Version */
110static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
111static int debug;
112static int quartz;
113
114#ifdef CONFIG_DSCC4_PCI_RST
115static DECLARE_MUTEX(dscc4_sem);
116static u32 dscc4_pci_config_store[16];
117#endif
118
119#define DRV_NAME "dscc4"
120
121#undef DSCC4_POLLING
122
123/* Module parameters */
124
125MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
126MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
127MODULE_LICENSE("GPL");
128module_param(debug, int, 0);
129MODULE_PARM_DESC(debug,"Enable/disable extra messages");
130module_param(quartz, int, 0);
131MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
132
133/* Structures */
134
135struct thingie {
136 int define;
137 u32 bits;
138};
139
140struct TxFD {
141 u32 state;
142 u32 next;
143 u32 data;
144 u32 complete;
145 u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */
146};
147
148struct RxFD {
149 u32 state1;
150 u32 next;
151 u32 data;
152 u32 state2;
153 u32 end;
154};
155
156#define DUMMY_SKB_SIZE 64
157#define TX_LOW 8
158#define TX_RING_SIZE 32
159#define RX_RING_SIZE 32
160#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
161#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
162#define IRQ_RING_SIZE 64 /* Keep it a multiple of 32 */
163#define TX_TIMEOUT (HZ/10)
164#define DSCC4_HZ_MAX 33000000
165#define BRR_DIVIDER_MAX 64*0x00004000 /* Cf errata DS5 p.10 */
166#define dev_per_card 4
167#define SCC_REGISTERS_MAX 23 /* Cf errata DS5 p.4 */
168
169#define SOURCE_ID(flags) (((flags) >> 28) & 0x03)
170#define TO_SIZE(state) (((state) >> 16) & 0x1fff)
171
172/*
173 * Given the operating range of Linux HDLC, the 2 defines below could be
174 * made simpler. However they are a fine reminder for the limitations of
175 * the driver: it's better to stay < TxSizeMax and < RxSizeMax.
176 */
177#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16)
178#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
179#define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */
180#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
181
182struct dscc4_pci_priv {
183 u32 *iqcfg;
184 int cfg_cur;
185 spinlock_t lock;
186 struct pci_dev *pdev;
187
188 struct dscc4_dev_priv *root;
189 dma_addr_t iqcfg_dma;
190 u32 xtal_hz;
191};
192
193struct dscc4_dev_priv {
194 struct sk_buff *rx_skbuff[RX_RING_SIZE];
195 struct sk_buff *tx_skbuff[TX_RING_SIZE];
196
197 struct RxFD *rx_fd;
198 struct TxFD *tx_fd;
199 u32 *iqrx;
200 u32 *iqtx;
201
202 /* FIXME: check all the volatile are required */
203 volatile u32 tx_current;
204 u32 rx_current;
205 u32 iqtx_current;
206 u32 iqrx_current;
207
208 volatile u32 tx_dirty;
209 volatile u32 ltda;
210 u32 rx_dirty;
211 u32 lrda;
212
213 dma_addr_t tx_fd_dma;
214 dma_addr_t rx_fd_dma;
215 dma_addr_t iqtx_dma;
216 dma_addr_t iqrx_dma;
217
218 u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */
219
220 struct timer_list timer;
221
222 struct dscc4_pci_priv *pci_priv;
223 spinlock_t lock;
224
225 int dev_id;
226 volatile u32 flags;
227 u32 timer_help;
228
229 unsigned short encoding;
230 unsigned short parity;
231 struct net_device *dev;
232 sync_serial_settings settings;
233 void __iomem *base_addr;
234 u32 __pad __attribute__ ((aligned (4)));
235};
236
237/* GLOBAL registers definitions */
238#define GCMDR 0x00
239#define GSTAR 0x04
240#define GMODE 0x08
241#define IQLENR0 0x0C
242#define IQLENR1 0x10
243#define IQRX0 0x14
244#define IQTX0 0x24
245#define IQCFG 0x3c
246#define FIFOCR1 0x44
247#define FIFOCR2 0x48
248#define FIFOCR3 0x4c
249#define FIFOCR4 0x34
250#define CH0CFG 0x50
251#define CH0BRDA 0x54
252#define CH0BTDA 0x58
253#define CH0FRDA 0x98
254#define CH0FTDA 0xb0
255#define CH0LRDA 0xc8
256#define CH0LTDA 0xe0
257
258/* SCC registers definitions */
259#define SCC_START 0x0100
260#define SCC_OFFSET 0x80
261#define CMDR 0x00
262#define STAR 0x04
263#define CCR0 0x08
264#define CCR1 0x0c
265#define CCR2 0x10
266#define BRR 0x2C
267#define RLCR 0x40
268#define IMR 0x54
269#define ISR 0x58
270
271#define GPDIR 0x0400
272#define GPDATA 0x0404
273#define GPIM 0x0408
274
275/* Bit masks */
276#define EncodingMask 0x00700000
277#define CrcMask 0x00000003
278
279#define IntRxScc0 0x10000000
280#define IntTxScc0 0x01000000
281
282#define TxPollCmd 0x00000400
283#define RxActivate 0x08000000
284#define MTFi 0x04000000
285#define Rdr 0x00400000
286#define Rdt 0x00200000
287#define Idr 0x00100000
288#define Idt 0x00080000
289#define TxSccRes 0x01000000
290#define RxSccRes 0x00010000
291#define TxSizeMax 0x1fff /* Datasheet DS1 - 11.1.1.1 */
292#define RxSizeMax 0x1ffc /* Datasheet DS1 - 11.1.2.1 */
293
294#define Ccr0ClockMask 0x0000003f
295#define Ccr1LoopMask 0x00000200
296#define IsrMask 0x000fffff
297#define BrrExpMask 0x00000f00
298#define BrrMultMask 0x0000003f
299#define EncodingMask 0x00700000
300#define Hold 0x40000000
301#define SccBusy 0x10000000
302#define PowerUp 0x80000000
303#define Vis 0x00001000
304#define FrameOk (FrameVfr | FrameCrc)
305#define FrameVfr 0x80
306#define FrameRdo 0x40
307#define FrameCrc 0x20
308#define FrameRab 0x10
309#define FrameAborted 0x00000200
310#define FrameEnd 0x80000000
311#define DataComplete 0x40000000
312#define LengthCheck 0x00008000
313#define SccEvt 0x02000000
314#define NoAck 0x00000200
315#define Action 0x00000001
316#define HiDesc 0x20000000
317
318/* SCC events */
319#define RxEvt 0xf0000000
320#define TxEvt 0x0f000000
321#define Alls 0x00040000
322#define Xdu 0x00010000
323#define Cts 0x00004000
324#define Xmr 0x00002000
325#define Xpr 0x00001000
326#define Rdo 0x00000080
327#define Rfs 0x00000040
328#define Cd 0x00000004
329#define Rfo 0x00000002
330#define Flex 0x00000001
331
332/* DMA core events */
333#define Cfg 0x00200000
334#define Hi 0x00040000
335#define Fi 0x00020000
336#define Err 0x00010000
337#define Arf 0x00000002
338#define ArAck 0x00000001
339
340/* State flags */
341#define Ready 0x00000000
342#define NeedIDR 0x00000001
343#define NeedIDT 0x00000002
344#define RdoSet 0x00000004
345#define FakeReset 0x00000008
346
347/* Don't mask RDO. Ever. */
348#ifdef DSCC4_POLLING
349#define EventsMask 0xfffeef7f
350#else
351#define EventsMask 0xfffa8f7a
352#endif
353
354/* Functions prototypes */
355static void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
356static void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
357static int dscc4_found1(struct pci_dev *, void __iomem *ioaddr);
358static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
359static int dscc4_open(struct net_device *);
360static int dscc4_start_xmit(struct sk_buff *, struct net_device *);
361static int dscc4_close(struct net_device *);
362static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
363static int dscc4_init_ring(struct net_device *);
364static void dscc4_release_ring(struct dscc4_dev_priv *);
365static void dscc4_timer(unsigned long);
366static void dscc4_tx_timeout(struct net_device *);
367static irqreturn_t dscc4_irq(int irq, void *dev_id, struct pt_regs *ptregs);
368static int dscc4_hdlc_attach(struct net_device *, unsigned short, unsigned short);
369static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *);
370#ifdef DSCC4_POLLING
371static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
372#endif
373
374static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev)
375{
376 return dev_to_hdlc(dev)->priv;
377}
378
379static inline struct net_device *dscc4_to_dev(struct dscc4_dev_priv *p)
380{
381 return p->dev;
382}
383
384static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv,
385 struct net_device *dev, int offset)
386{
387 u32 state;
388
389 /* Cf scc_writel for concern regarding thread-safety */
390 state = dpriv->scc_regs[offset >> 2];
391 state &= ~mask;
392 state |= value;
393 dpriv->scc_regs[offset >> 2] = state;
394 writel(state, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
395}
396
397static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv,
398 struct net_device *dev, int offset)
399{
400 /*
401 * Thread-UNsafe.
402 * As of 2002/02/16, there are no thread racing for access.
403 */
404 dpriv->scc_regs[offset >> 2] = bits;
405 writel(bits, dpriv->base_addr + SCC_REG_START(dpriv) + offset);
406}
407
408static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset)
409{
410 return dpriv->scc_regs[offset >> 2];
411}
412
413static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev)
414{
415 /* Cf errata DS5 p.4 */
416 readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
417 return readl(dpriv->base_addr + SCC_REG_START(dpriv) + STAR);
418}
419
420static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv,
421 struct net_device *dev)
422{
423 dpriv->ltda = dpriv->tx_fd_dma +
424 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD);
425 writel(dpriv->ltda, dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
426 /* Flush posted writes *NOW* */
427 readl(dpriv->base_addr + CH0LTDA + dpriv->dev_id*4);
428}
429
430static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv,
431 struct net_device *dev)
432{
433 dpriv->lrda = dpriv->rx_fd_dma +
434 ((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD);
435 writel(dpriv->lrda, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
436}
437
438static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv)
439{
440 return dpriv->tx_current == dpriv->tx_dirty;
441}
442
443static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
444 struct net_device *dev)
445{
446 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
447}
448
449int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev,
450 const char *msg)
451{
452 int ret = 0;
453
454 if (debug > 1) {
455 if (SOURCE_ID(state) != dpriv->dev_id) {
456 printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
457 dev->name, msg, SOURCE_ID(state), state );
458 ret = -1;
459 }
460 if (state & 0x0df80c00) {
461 printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
462 dev->name, msg, state);
463 ret = -1;
464 }
465 }
466 return ret;
467}
468
469void dscc4_tx_print(struct net_device *dev, struct dscc4_dev_priv *dpriv,
470 char *msg)
471{
472 printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
473 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
474}
475
476static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
477{
478 struct pci_dev *pdev = dpriv->pci_priv->pdev;
479 struct TxFD *tx_fd = dpriv->tx_fd;
480 struct RxFD *rx_fd = dpriv->rx_fd;
481 struct sk_buff **skbuff;
482 int i;
483
484 pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma);
485 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
486
487 skbuff = dpriv->tx_skbuff;
488 for (i = 0; i < TX_RING_SIZE; i++) {
489 if (*skbuff) {
490 pci_unmap_single(pdev, tx_fd->data, (*skbuff)->len,
491 PCI_DMA_TODEVICE);
492 dev_kfree_skb(*skbuff);
493 }
494 skbuff++;
495 tx_fd++;
496 }
497
498 skbuff = dpriv->rx_skbuff;
499 for (i = 0; i < RX_RING_SIZE; i++) {
500 if (*skbuff) {
501 pci_unmap_single(pdev, rx_fd->data,
502 RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
503 dev_kfree_skb(*skbuff);
504 }
505 skbuff++;
506 rx_fd++;
507 }
508}
509
510inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev)
511{
512 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
513 struct RxFD *rx_fd = dpriv->rx_fd + dirty;
514 const int len = RX_MAX(HDLC_MAX_MRU);
515 struct sk_buff *skb;
516 int ret = 0;
517
518 skb = dev_alloc_skb(len);
519 dpriv->rx_skbuff[dirty] = skb;
520 if (skb) {
521 skb->protocol = hdlc_type_trans(skb, dev);
522 rx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data,
523 len, PCI_DMA_FROMDEVICE);
524 } else {
525 rx_fd->data = (u32) NULL;
526 ret = -1;
527 }
528 return ret;
529}
530
531/*
532 * IRQ/thread/whatever safe
533 */
534static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
535 struct net_device *dev, char *msg)
536{
537 s8 i = 0;
538
539 do {
540 if (!(scc_readl_star(dpriv, dev) & SccBusy)) {
541 printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name,
542 msg, i);
543 goto done;
544 }
545 set_current_state(TASK_UNINTERRUPTIBLE);
546 schedule_timeout(10);
547 rmb();
548 } while (++i > 0);
549 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
550done:
551 return (i >= 0) ? i : -EAGAIN;
552}
553
554static int dscc4_do_action(struct net_device *dev, char *msg)
555{
556 void __iomem *ioaddr = dscc4_priv(dev)->base_addr;
557 s16 i = 0;
558
559 writel(Action, ioaddr + GCMDR);
560 ioaddr += GSTAR;
561 do {
562 u32 state = readl(ioaddr);
563
564 if (state & ArAck) {
565 printk(KERN_DEBUG "%s: %s ack\n", dev->name, msg);
566 writel(ArAck, ioaddr);
567 goto done;
568 } else if (state & Arf) {
569 printk(KERN_ERR "%s: %s failed\n", dev->name, msg);
570 writel(Arf, ioaddr);
571 i = -1;
572 goto done;
573 }
574 rmb();
575 } while (++i > 0);
576 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
577done:
578 return i;
579}
580
581static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
582{
583 int cur = dpriv->iqtx_current%IRQ_RING_SIZE;
584 s8 i = 0;
585
586 do {
587 if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
588 (dpriv->iqtx[cur] & Xpr))
589 break;
590 smp_rmb();
591 set_current_state(TASK_UNINTERRUPTIBLE);
592 schedule_timeout(10);
593 } while (++i > 0);
594
595 return (i >= 0 ) ? i : -EAGAIN;
596}
597
598#if 0 /* dscc4_{rx/tx}_reset are both unreliable - more tweak needed */
599static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
600{
601 unsigned long flags;
602
603 spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
604 /* Cf errata DS5 p.6 */
605 writel(0x00000000, dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
606 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
607 readl(dpriv->base_addr + CH0LRDA + dpriv->dev_id*4);
608 writel(MTFi|Rdr, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
609 writel(Action, dpriv->base_addr + GCMDR);
610 spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
611}
612
613#endif
614
615#if 0
616static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
617{
618 u16 i = 0;
619
620 /* Cf errata DS5 p.7 */
621 scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
622 scc_writel(0x00050000, dpriv, dev, CCR2);
623 /*
624 * Must be longer than the time required to fill the fifo.
625 */
626 while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
627 udelay(1);
628 wmb();
629 }
630
631 writel(MTFi|Rdt, dpriv->base_addr + dpriv->dev_id*0x0c + CH0CFG);
632 if (dscc4_do_action(dev, "Rdt") < 0)
633 printk(KERN_ERR "%s: Tx reset failed\n", dev->name);
634}
635#endif
636
637/* TODO: (ab)use this function to refill a completely depleted RX ring. */
638static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
639 struct net_device *dev)
640{
641 struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE;
642 struct net_device_stats *stats = hdlc_stats(dev);
643 struct pci_dev *pdev = dpriv->pci_priv->pdev;
644 struct sk_buff *skb;
645 int pkt_len;
646
647 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
648 if (!skb) {
649 printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__);
650 goto refill;
651 }
652 pkt_len = TO_SIZE(rx_fd->state2);
653 pci_unmap_single(pdev, rx_fd->data, RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
654 if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
655 stats->rx_packets++;
656 stats->rx_bytes += pkt_len;
657 skb_put(skb, pkt_len);
658 if (netif_running(dev))
659 skb->protocol = hdlc_type_trans(skb, dev);
660 skb->dev->last_rx = jiffies;
661 netif_rx(skb);
662 } else {
663 if (skb->data[pkt_len] & FrameRdo)
664 stats->rx_fifo_errors++;
665 else if (!(skb->data[pkt_len] | ~FrameCrc))
666 stats->rx_crc_errors++;
667 else if (!(skb->data[pkt_len] | ~(FrameVfr | FrameRab)))
668 stats->rx_length_errors++;
669 else
670 stats->rx_errors++;
671 dev_kfree_skb_irq(skb);
672 }
673refill:
674 while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) {
675 if (try_get_rx_skb(dpriv, dev) < 0)
676 break;
677 dpriv->rx_dirty++;
678 }
679 dscc4_rx_update(dpriv, dev);
680 rx_fd->state2 = 0x00000000;
681 rx_fd->end = 0xbabeface;
682}
683
684static void dscc4_free1(struct pci_dev *pdev)
685{
686 struct dscc4_pci_priv *ppriv;
687 struct dscc4_dev_priv *root;
688 int i;
689
690 ppriv = pci_get_drvdata(pdev);
691 root = ppriv->root;
692
693 for (i = 0; i < dev_per_card; i++)
694 unregister_hdlc_device(dscc4_to_dev(root + i));
695
696 pci_set_drvdata(pdev, NULL);
697
698 for (i = 0; i < dev_per_card; i++)
699 free_netdev(root[i].dev);
700 kfree(root);
701 kfree(ppriv);
702}
703
704static int __devinit dscc4_init_one(struct pci_dev *pdev,
705 const struct pci_device_id *ent)
706{
707 struct dscc4_pci_priv *priv;
708 struct dscc4_dev_priv *dpriv;
709 void __iomem *ioaddr;
710 int i, rc;
711
712 printk(KERN_DEBUG "%s", version);
713
714 rc = pci_enable_device(pdev);
715 if (rc < 0)
716 goto out;
717
718 rc = pci_request_region(pdev, 0, "registers");
719 if (rc < 0) {
720 printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n",
721 DRV_NAME);
722 goto err_disable_0;
723 }
724 rc = pci_request_region(pdev, 1, "LBI interface");
725 if (rc < 0) {
726 printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n",
727 DRV_NAME);
728 goto err_free_mmio_region_1;
729 }
730
731 ioaddr = ioremap(pci_resource_start(pdev, 0),
732 pci_resource_len(pdev, 0));
733 if (!ioaddr) {
734 printk(KERN_ERR "%s: cannot remap MMIO region %lx @ %lx\n",
735 DRV_NAME, pci_resource_len(pdev, 0),
736 pci_resource_start(pdev, 0));
737 rc = -EIO;
738 goto err_free_mmio_regions_2;
739 }
740 printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d\n",
741 pci_resource_start(pdev, 0),
742 pci_resource_start(pdev, 1), pdev->irq);
743
744 /* Cf errata DS5 p.2 */
745 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8);
746 pci_set_master(pdev);
747
748 rc = dscc4_found1(pdev, ioaddr);
749 if (rc < 0)
750 goto err_iounmap_3;
751
752 priv = pci_get_drvdata(pdev);
753
754 rc = request_irq(pdev->irq, dscc4_irq, SA_SHIRQ, DRV_NAME, priv->root);
755 if (rc < 0) {
756 printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
757 goto err_release_4;
758 }
759
760 /* power up/little endian/dma core controlled via lrda/ltda */
761 writel(0x00000001, ioaddr + GMODE);
762 /* Shared interrupt queue */
763 {
764 u32 bits;
765
766 bits = (IRQ_RING_SIZE >> 5) - 1;
767 bits |= bits << 4;
768 bits |= bits << 8;
769 bits |= bits << 16;
770 writel(bits, ioaddr + IQLENR0);
771 }
772 /* Global interrupt queue */
773 writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
774 priv->iqcfg = (u32 *) pci_alloc_consistent(pdev,
775 IRQ_RING_SIZE*sizeof(u32), &priv->iqcfg_dma);
776 if (!priv->iqcfg)
777 goto err_free_irq_5;
778 writel(priv->iqcfg_dma, ioaddr + IQCFG);
779
780 rc = -ENOMEM;
781
782 /*
783 * SCC 0-3 private rx/tx irq structures
784 * IQRX/TXi needs to be set soon. Learned it the hard way...
785 */
786 for (i = 0; i < dev_per_card; i++) {
787 dpriv = priv->root + i;
788 dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev,
789 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
790 if (!dpriv->iqtx)
791 goto err_free_iqtx_6;
792 writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
793 }
794 for (i = 0; i < dev_per_card; i++) {
795 dpriv = priv->root + i;
796 dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev,
797 IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
798 if (!dpriv->iqrx)
799 goto err_free_iqrx_7;
800 writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
801 }
802
803 /* Cf application hint. Beware of hard-lock condition on threshold. */
804 writel(0x42104000, ioaddr + FIFOCR1);
805 //writel(0x9ce69800, ioaddr + FIFOCR2);
806 writel(0xdef6d800, ioaddr + FIFOCR2);
807 //writel(0x11111111, ioaddr + FIFOCR4);
808 writel(0x18181818, ioaddr + FIFOCR4);
809 // FIXME: should depend on the chipset revision
810 writel(0x0000000e, ioaddr + FIFOCR3);
811
812 writel(0xff200001, ioaddr + GCMDR);
813
814 rc = 0;
815out:
816 return rc;
817
818err_free_iqrx_7:
819 while (--i >= 0) {
820 dpriv = priv->root + i;
821 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
822 dpriv->iqrx, dpriv->iqrx_dma);
823 }
824 i = dev_per_card;
825err_free_iqtx_6:
826 while (--i >= 0) {
827 dpriv = priv->root + i;
828 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
829 dpriv->iqtx, dpriv->iqtx_dma);
830 }
831 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg,
832 priv->iqcfg_dma);
833err_free_irq_5:
834 free_irq(pdev->irq, priv->root);
835err_release_4:
836 dscc4_free1(pdev);
837err_iounmap_3:
838 iounmap (ioaddr);
839err_free_mmio_regions_2:
840 pci_release_region(pdev, 1);
841err_free_mmio_region_1:
842 pci_release_region(pdev, 0);
843err_disable_0:
844 pci_disable_device(pdev);
845 goto out;
846};
847
848/*
849 * Let's hope the default values are decent enough to protect my
850 * feet from the user's gun - Ueimor
851 */
852static void dscc4_init_registers(struct dscc4_dev_priv *dpriv,
853 struct net_device *dev)
854{
855 /* No interrupts, SCC core disabled. Let's relax */
856 scc_writel(0x00000000, dpriv, dev, CCR0);
857
858 scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR);
859
860 /*
861 * No address recognition/crc-CCITT/cts enabled
862 * Shared flags transmission disabled - cf errata DS5 p.11
863 * Carrier detect disabled - cf errata p.14
864 * FIXME: carrier detection/polarity may be handled more gracefully.
865 */
866 scc_writel(0x02408000, dpriv, dev, CCR1);
867
868 /* crc not forwarded - Cf errata DS5 p.11 */
869 scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2);
870 // crc forwarded
871 //scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2);
872}
873
874static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
875{
876 int ret = 0;
877
878 if ((hz < 0) || (hz > DSCC4_HZ_MAX))
879 ret = -EOPNOTSUPP;
880 else
881 dpriv->pci_priv->xtal_hz = hz;
882
883 return ret;
884}
885
886static int dscc4_found1(struct pci_dev *pdev, void __iomem *ioaddr)
887{
888 struct dscc4_pci_priv *ppriv;
889 struct dscc4_dev_priv *root;
890 int i, ret = -ENOMEM;
891
892 root = kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL);
893 if (!root) {
894 printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
895 goto err_out;
896 }
897 memset(root, 0, dev_per_card*sizeof(*root));
898
899 for (i = 0; i < dev_per_card; i++) {
900 root[i].dev = alloc_hdlcdev(root + i);
901 if (!root[i].dev)
902 goto err_free_dev;
903 }
904
905 ppriv = kmalloc(sizeof(*ppriv), GFP_KERNEL);
906 if (!ppriv) {
907 printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
908 goto err_free_dev;
909 }
910 memset(ppriv, 0, sizeof(struct dscc4_pci_priv));
911
912 ppriv->root = root;
913 spin_lock_init(&ppriv->lock);
914
915 for (i = 0; i < dev_per_card; i++) {
916 struct dscc4_dev_priv *dpriv = root + i;
917 struct net_device *d = dscc4_to_dev(dpriv);
918 hdlc_device *hdlc = dev_to_hdlc(d);
919
920 d->base_addr = (unsigned long)ioaddr;
921 d->init = NULL;
922 d->irq = pdev->irq;
923 d->open = dscc4_open;
924 d->stop = dscc4_close;
925 d->set_multicast_list = NULL;
926 d->do_ioctl = dscc4_ioctl;
927 d->tx_timeout = dscc4_tx_timeout;
928 d->watchdog_timeo = TX_TIMEOUT;
929 SET_MODULE_OWNER(d);
930 SET_NETDEV_DEV(d, &pdev->dev);
931
932 dpriv->dev_id = i;
933 dpriv->pci_priv = ppriv;
934 dpriv->base_addr = ioaddr;
935 spin_lock_init(&dpriv->lock);
936
937 hdlc->xmit = dscc4_start_xmit;
938 hdlc->attach = dscc4_hdlc_attach;
939
940 dscc4_init_registers(dpriv, d);
941 dpriv->parity = PARITY_CRC16_PR0_CCITT;
942 dpriv->encoding = ENCODING_NRZ;
943
944 ret = dscc4_init_ring(d);
945 if (ret < 0)
946 goto err_unregister;
947
948 ret = register_hdlc_device(d);
949 if (ret < 0) {
950 printk(KERN_ERR "%s: unable to register\n", DRV_NAME);
951 dscc4_release_ring(dpriv);
952 goto err_unregister;
953 }
954 }
955
956 ret = dscc4_set_quartz(root, quartz);
957 if (ret < 0)
958 goto err_unregister;
959
960 pci_set_drvdata(pdev, ppriv);
961 return ret;
962
963err_unregister:
964 while (i-- > 0) {
965 dscc4_release_ring(root + i);
966 unregister_hdlc_device(dscc4_to_dev(root + i));
967 }
968 kfree(ppriv);
969 i = dev_per_card;
970err_free_dev:
971 while (i-- > 0)
972 free_netdev(root[i].dev);
973 kfree(root);
974err_out:
975 return ret;
976};
977
978/* FIXME: get rid of the unneeded code */
979static void dscc4_timer(unsigned long data)
980{
981 struct net_device *dev = (struct net_device *)data;
982 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
983// struct dscc4_pci_priv *ppriv;
984
985 goto done;
986done:
987 dpriv->timer.expires = jiffies + TX_TIMEOUT;
988 add_timer(&dpriv->timer);
989}
990
991static void dscc4_tx_timeout(struct net_device *dev)
992{
993 /* FIXME: something is missing there */
994}
995
996static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
997{
998 sync_serial_settings *settings = &dpriv->settings;
999
1000 if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
1001 struct net_device *dev = dscc4_to_dev(dpriv);
1002
1003 printk(KERN_INFO "%s: loopback requires clock\n", dev->name);
1004 return -1;
1005 }
1006 return 0;
1007}
1008
1009#ifdef CONFIG_DSCC4_PCI_RST
1010/*
1011 * Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together
1012 * so as to provide a safe way to reset the asic while not the whole machine
1013 * rebooting.
1014 *
1015 * This code doesn't need to be efficient. Keep It Simple
1016 */
1017static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1018{
1019 int i;
1020
1021 down(&dscc4_sem);
1022 for (i = 0; i < 16; i++)
1023 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
1024
1025 /* Maximal LBI clock divider (who cares ?) and whole GPIO range. */
1026 writel(0x001c0000, ioaddr + GMODE);
1027 /* Configure GPIO port as output */
1028 writel(0x0000ffff, ioaddr + GPDIR);
1029 /* Disable interruption */
1030 writel(0x0000ffff, ioaddr + GPIM);
1031
1032 writel(0x0000ffff, ioaddr + GPDATA);
1033 writel(0x00000000, ioaddr + GPDATA);
1034
1035 /* Flush posted writes */
1036 readl(ioaddr + GSTAR);
1037
1038 set_current_state(TASK_UNINTERRUPTIBLE);
1039 schedule_timeout(10);
1040
1041 for (i = 0; i < 16; i++)
1042 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
1043 up(&dscc4_sem);
1044}
1045#else
1046#define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
1047#endif /* CONFIG_DSCC4_PCI_RST */
1048
1049static int dscc4_open(struct net_device *dev)
1050{
1051 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1052 struct dscc4_pci_priv *ppriv;
1053 int ret = -EAGAIN;
1054
1055 if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit)
1056 goto err;
1057
1058 if ((ret = hdlc_open(dev)))
1059 goto err;
1060
1061 ppriv = dpriv->pci_priv;
1062
1063 /*
1064 * Due to various bugs, there is no way to reliably reset a
1065 * specific port (manufacturer's dependant special PCI #RST wiring
1066 * apart: it affects all ports). Thus the device goes in the best
1067 * silent mode possible at dscc4_close() time and simply claims to
1068 * be up if it's opened again. It still isn't possible to change
1069 * the HDLC configuration without rebooting but at least the ports
1070 * can be up/down ifconfig'ed without killing the host.
1071 */
1072 if (dpriv->flags & FakeReset) {
1073 dpriv->flags &= ~FakeReset;
1074 scc_patchl(0, PowerUp, dpriv, dev, CCR0);
1075 scc_patchl(0, 0x00050000, dpriv, dev, CCR2);
1076 scc_writel(EventsMask, dpriv, dev, IMR);
1077 printk(KERN_INFO "%s: up again.\n", dev->name);
1078 goto done;
1079 }
1080
1081 /* IDT+IDR during XPR */
1082 dpriv->flags = NeedIDR | NeedIDT;
1083
1084 scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0);
1085
1086 /*
1087 * The following is a bit paranoid...
1088 *
1089 * NB: the datasheet "...CEC will stay active if the SCC is in
1090 * power-down mode or..." and CCR2.RAC = 1 are two different
1091 * situations.
1092 */
1093 if (scc_readl_star(dpriv, dev) & SccBusy) {
1094 printk(KERN_ERR "%s busy. Try later\n", dev->name);
1095 ret = -EAGAIN;
1096 goto err_out;
1097 } else
1098 printk(KERN_INFO "%s: available. Good\n", dev->name);
1099
1100 scc_writel(EventsMask, dpriv, dev, IMR);
1101
1102 /* Posted write is flushed in the wait_ack loop */
1103 scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR);
1104
1105 if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0)
1106 goto err_disable_scc_events;
1107
1108 /*
1109 * I would expect XPR near CE completion (before ? after ?).
1110 * At worst, this code won't see a late XPR and people
1111 * will have to re-issue an ifconfig (this is harmless).
1112 * WARNING, a really missing XPR usually means a hardware
1113 * reset is needed. Suggestions anyone ?
1114 */
1115 if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
1116 printk(KERN_ERR "%s: %s timeout\n", DRV_NAME, "XPR");
1117 goto err_disable_scc_events;
1118 }
1119
1120 if (debug > 2)
1121 dscc4_tx_print(dev, dpriv, "Open");
1122
1123done:
1124 netif_start_queue(dev);
1125
1126 init_timer(&dpriv->timer);
1127 dpriv->timer.expires = jiffies + 10*HZ;
1128 dpriv->timer.data = (unsigned long)dev;
1129 dpriv->timer.function = &dscc4_timer;
1130 add_timer(&dpriv->timer);
1131 netif_carrier_on(dev);
1132
1133 return 0;
1134
1135err_disable_scc_events:
1136 scc_writel(0xffffffff, dpriv, dev, IMR);
1137 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1138err_out:
1139 hdlc_close(dev);
1140err:
1141 return ret;
1142}
1143
1144#ifdef DSCC4_POLLING
1145static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1146{
1147 /* FIXME: it's gonna be easy (TM), for sure */
1148}
1149#endif /* DSCC4_POLLING */
1150
1151static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev)
1152{
1153 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1154 struct dscc4_pci_priv *ppriv = dpriv->pci_priv;
1155 struct TxFD *tx_fd;
1156 int next;
1157
1158 next = dpriv->tx_current%TX_RING_SIZE;
1159 dpriv->tx_skbuff[next] = skb;
1160 tx_fd = dpriv->tx_fd + next;
1161 tx_fd->state = FrameEnd | TO_STATE_TX(skb->len);
1162 tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len,
1163 PCI_DMA_TODEVICE);
1164 tx_fd->complete = 0x00000000;
1165 tx_fd->jiffies = jiffies;
1166 mb();
1167
1168#ifdef DSCC4_POLLING
1169 spin_lock(&dpriv->lock);
1170 while (dscc4_tx_poll(dpriv, dev));
1171 spin_unlock(&dpriv->lock);
1172#endif
1173
1174 dev->trans_start = jiffies;
1175
1176 if (debug > 2)
1177 dscc4_tx_print(dev, dpriv, "Xmit");
1178 /* To be cleaned(unsigned int)/optimized. Later, ok ? */
1179 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE))
1180 netif_stop_queue(dev);
1181
1182 if (dscc4_tx_quiescent(dpriv, dev))
1183 dscc4_do_tx(dpriv, dev);
1184
1185 return 0;
1186}
1187
1188static int dscc4_close(struct net_device *dev)
1189{
1190 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1191
1192 del_timer_sync(&dpriv->timer);
1193 netif_stop_queue(dev);
1194
1195 scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
1196 scc_patchl(0x00050000, 0, dpriv, dev, CCR2);
1197 scc_writel(0xffffffff, dpriv, dev, IMR);
1198
1199 dpriv->flags |= FakeReset;
1200
1201 hdlc_close(dev);
1202
1203 return 0;
1204}
1205
1206static inline int dscc4_check_clock_ability(int port)
1207{
1208 int ret = 0;
1209
1210#ifdef CONFIG_DSCC4_PCISYNC
1211 if (port >= 2)
1212 ret = -1;
1213#endif
1214 return ret;
1215}
1216
1217/*
1218 * DS1 p.137: "There are a total of 13 different clocking modes..."
1219 * ^^
1220 * Design choices:
1221 * - by default, assume a clock is provided on pin RxClk/TxClk (clock mode 0a).
1222 * Clock mode 3b _should_ work but the testing seems to make this point
1223 * dubious (DIY testing requires setting CCR0 at 0x00000033).
1224 * This is supposed to provide least surprise "DTE like" behavior.
1225 * - if line rate is specified, clocks are assumed to be locally generated.
1226 * A quartz must be available (on pin XTAL1). Modes 6b/7b are used. Choosing
1227 * between these it automagically done according on the required frequency
1228 * scaling. Of course some rounding may take place.
1229 * - no high speed mode (40Mb/s). May be trivial to do but I don't have an
1230 * appropriate external clocking device for testing.
1231 * - no time-slot/clock mode 5: shameless lazyness.
1232 *
1233 * The clock signals wiring can be (is ?) manufacturer dependant. Good luck.
1234 *
1235 * BIG FAT WARNING: if the device isn't provided enough clocking signal, it
1236 * won't pass the init sequence. For example, straight back-to-back DTE without
1237 * external clock will fail when dscc4_open() (<- 'ifconfig hdlcx xxx') is
1238 * called.
1239 *
1240 * Typos lurk in datasheet (missing divier in clock mode 7a figure 51 p.153
1241 * DS0 for example)
1242 *
1243 * Clock mode related bits of CCR0:
1244 * +------------ TOE: output TxClk (0b/2b/3a/3b/6b/7a/7b only)
1245 * | +---------- SSEL: sub-mode select 0 -> a, 1 -> b
1246 * | | +-------- High Speed: say 0
1247 * | | | +-+-+-- Clock Mode: 0..7
1248 * | | | | | |
1249 * -+-+-+-+-+-+-+-+
1250 * x|x|5|4|3|2|1|0| lower bits
1251 *
1252 * Division factor of BRR: k = (N+1)x2^M (total divider = 16xk in mode 6b)
1253 * +-+-+-+------------------ M (0..15)
1254 * | | | | +-+-+-+-+-+-- N (0..63)
1255 * 0 0 0 0 | | | | 0 0 | | | | | |
1256 * ...-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1257 * f|e|d|c|b|a|9|8|7|6|5|4|3|2|1|0| lower bits
1258 *
1259 */
1260static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
1261{
1262 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1263 int ret = -1;
1264 u32 brr;
1265
1266 *state &= ~Ccr0ClockMask;
1267 if (*bps) { /* Clock generated - required for DCE */
1268 u32 n = 0, m = 0, divider;
1269 int xtal;
1270
1271 xtal = dpriv->pci_priv->xtal_hz;
1272 if (!xtal)
1273 goto done;
1274 if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
1275 goto done;
1276 divider = xtal / *bps;
1277 if (divider > BRR_DIVIDER_MAX) {
1278 divider >>= 4;
1279 *state |= 0x00000036; /* Clock mode 6b (BRG/16) */
1280 } else
1281 *state |= 0x00000037; /* Clock mode 7b (BRG) */
1282 if (divider >> 22) {
1283 n = 63;
1284 m = 15;
1285 } else if (divider) {
1286 /* Extraction of the 6 highest weighted bits */
1287 m = 0;
1288 while (0xffffffc0 & divider) {
1289 m++;
1290 divider >>= 1;
1291 }
1292 n = divider;
1293 }
1294 brr = (m << 8) | n;
1295 divider = n << m;
1296 if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */
1297 divider <<= 4;
1298 *bps = xtal / divider;
1299 } else {
1300 /*
1301 * External clock - DTE
1302 * "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00).
1303 * Nothing more to be done
1304 */
1305 brr = 0;
1306 }
1307 scc_writel(brr, dpriv, dev, BRR);
1308 ret = 0;
1309done:
1310 return ret;
1311}
1312
1313static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1314{
1315 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1316 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1317 const size_t size = sizeof(dpriv->settings);
1318 int ret = 0;
1319
1320 if (dev->flags & IFF_UP)
1321 return -EBUSY;
1322
1323 if (cmd != SIOCWANDEV)
1324 return -EOPNOTSUPP;
1325
1326 switch(ifr->ifr_settings.type) {
1327 case IF_GET_IFACE:
1328 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
1329 if (ifr->ifr_settings.size < size) {
1330 ifr->ifr_settings.size = size; /* data size wanted */
1331 return -ENOBUFS;
1332 }
1333 if (copy_to_user(line, &dpriv->settings, size))
1334 return -EFAULT;
1335 break;
1336
1337 case IF_IFACE_SYNC_SERIAL:
1338 if (!capable(CAP_NET_ADMIN))
1339 return -EPERM;
1340
1341 if (dpriv->flags & FakeReset) {
1342 printk(KERN_INFO "%s: please reset the device"
1343 " before this command\n", dev->name);
1344 return -EPERM;
1345 }
1346 if (copy_from_user(&dpriv->settings, line, size))
1347 return -EFAULT;
1348 ret = dscc4_set_iface(dpriv, dev);
1349 break;
1350
1351 default:
1352 ret = hdlc_ioctl(dev, ifr, cmd);
1353 break;
1354 }
1355
1356 return ret;
1357}
1358
1359static int dscc4_match(struct thingie *p, int value)
1360{
1361 int i;
1362
1363 for (i = 0; p[i].define != -1; i++) {
1364 if (value == p[i].define)
1365 break;
1366 }
1367 if (p[i].define == -1)
1368 return -1;
1369 else
1370 return i;
1371}
1372
1373static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv,
1374 struct net_device *dev)
1375{
1376 sync_serial_settings *settings = &dpriv->settings;
1377 int ret = -EOPNOTSUPP;
1378 u32 bps, state;
1379
1380 bps = settings->clock_rate;
1381 state = scc_readl(dpriv, CCR0);
1382 if (dscc4_set_clock(dev, &bps, &state) < 0)
1383 goto done;
1384 if (bps) { /* DCE */
1385 printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
1386 if (settings->clock_rate != bps) {
1387 printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n",
1388 dev->name, settings->clock_rate, bps);
1389 settings->clock_rate = bps;
1390 }
1391 } else { /* DTE */
1392 state |= PowerUp | Vis;
1393 printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
1394 }
1395 scc_writel(state, dpriv, dev, CCR0);
1396 ret = 0;
1397done:
1398 return ret;
1399}
1400
1401static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
1402 struct net_device *dev)
1403{
1404 struct thingie encoding[] = {
1405 { ENCODING_NRZ, 0x00000000 },
1406 { ENCODING_NRZI, 0x00200000 },
1407 { ENCODING_FM_MARK, 0x00400000 },
1408 { ENCODING_FM_SPACE, 0x00500000 },
1409 { ENCODING_MANCHESTER, 0x00600000 },
1410 { -1, 0}
1411 };
1412 int i, ret = 0;
1413
1414 i = dscc4_match(encoding, dpriv->encoding);
1415 if (i >= 0)
1416 scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0);
1417 else
1418 ret = -EOPNOTSUPP;
1419 return ret;
1420}
1421
1422static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
1423 struct net_device *dev)
1424{
1425 sync_serial_settings *settings = &dpriv->settings;
1426 u32 state;
1427
1428 state = scc_readl(dpriv, CCR1);
1429 if (settings->loopback) {
1430 printk(KERN_DEBUG "%s: loopback\n", dev->name);
1431 state |= 0x00000100;
1432 } else {
1433 printk(KERN_DEBUG "%s: normal\n", dev->name);
1434 state &= ~0x00000100;
1435 }
1436 scc_writel(state, dpriv, dev, CCR1);
1437 return 0;
1438}
1439
1440static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
1441 struct net_device *dev)
1442{
1443 struct thingie crc[] = {
1444 { PARITY_CRC16_PR0_CCITT, 0x00000010 },
1445 { PARITY_CRC16_PR1_CCITT, 0x00000000 },
1446 { PARITY_CRC32_PR0_CCITT, 0x00000011 },
1447 { PARITY_CRC32_PR1_CCITT, 0x00000001 }
1448 };
1449 int i, ret = 0;
1450
1451 i = dscc4_match(crc, dpriv->parity);
1452 if (i >= 0)
1453 scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1);
1454 else
1455 ret = -EOPNOTSUPP;
1456 return ret;
1457}
1458
1459static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev)
1460{
1461 struct {
1462 int (*action)(struct dscc4_dev_priv *, struct net_device *);
1463 } *p, do_setting[] = {
1464 { dscc4_encoding_setting },
1465 { dscc4_clock_setting },
1466 { dscc4_loopback_setting },
1467 { dscc4_crc_setting },
1468 { NULL }
1469 };
1470 int ret = 0;
1471
1472 for (p = do_setting; p->action; p++) {
1473 if ((ret = p->action(dpriv, dev)) < 0)
1474 break;
1475 }
1476 return ret;
1477}
1478
1479static irqreturn_t dscc4_irq(int irq, void *token, struct pt_regs *ptregs)
1480{
1481 struct dscc4_dev_priv *root = token;
1482 struct dscc4_pci_priv *priv;
1483 struct net_device *dev;
1484 void __iomem *ioaddr;
1485 u32 state;
1486 unsigned long flags;
1487 int i, handled = 1;
1488
1489 priv = root->pci_priv;
1490 dev = dscc4_to_dev(root);
1491
1492 spin_lock_irqsave(&priv->lock, flags);
1493
1494 ioaddr = root->base_addr;
1495
1496 state = readl(ioaddr + GSTAR);
1497 if (!state) {
1498 handled = 0;
1499 goto out;
1500 }
1501 if (debug > 3)
1502 printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state);
1503 writel(state, ioaddr + GSTAR);
1504
1505 if (state & Arf) {
1506 printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n",
1507 dev->name);
1508 goto out;
1509 }
1510 state &= ~ArAck;
1511 if (state & Cfg) {
1512 if (debug > 0)
1513 printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
1514 if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & Arf)
1515 printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG");
1516 if (!(state &= ~Cfg))
1517 goto out;
1518 }
1519 if (state & RxEvt) {
1520 i = dev_per_card - 1;
1521 do {
1522 dscc4_rx_irq(priv, root + i);
1523 } while (--i >= 0);
1524 state &= ~RxEvt;
1525 }
1526 if (state & TxEvt) {
1527 i = dev_per_card - 1;
1528 do {
1529 dscc4_tx_irq(priv, root + i);
1530 } while (--i >= 0);
1531 state &= ~TxEvt;
1532 }
1533out:
1534 spin_unlock_irqrestore(&priv->lock, flags);
1535 return IRQ_RETVAL(handled);
1536}
1537
1538static void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
1539 struct dscc4_dev_priv *dpriv)
1540{
1541 struct net_device *dev = dscc4_to_dev(dpriv);
1542 u32 state;
1543 int cur, loop = 0;
1544
1545try:
1546 cur = dpriv->iqtx_current%IRQ_RING_SIZE;
1547 state = dpriv->iqtx[cur];
1548 if (!state) {
1549 if (debug > 4)
1550 printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name,
1551 state);
1552 if ((debug > 1) && (loop > 1))
1553 printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop);
1554 if (loop && netif_queue_stopped(dev))
1555 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)
1556 netif_wake_queue(dev);
1557
1558 if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) &&
1559 !dscc4_tx_done(dpriv))
1560 dscc4_do_tx(dpriv, dev);
1561 return;
1562 }
1563 loop++;
1564 dpriv->iqtx[cur] = 0;
1565 dpriv->iqtx_current++;
1566
1567 if (state_check(state, dpriv, dev, "Tx") < 0)
1568 return;
1569
1570 if (state & SccEvt) {
1571 if (state & Alls) {
1572 struct net_device_stats *stats = hdlc_stats(dev);
1573 struct sk_buff *skb;
1574 struct TxFD *tx_fd;
1575
1576 if (debug > 2)
1577 dscc4_tx_print(dev, dpriv, "Alls");
1578 /*
1579 * DataComplete can't be trusted for Tx completion.
1580 * Cf errata DS5 p.8
1581 */
1582 cur = dpriv->tx_dirty%TX_RING_SIZE;
1583 tx_fd = dpriv->tx_fd + cur;
1584 skb = dpriv->tx_skbuff[cur];
1585 if (skb) {
1586 pci_unmap_single(ppriv->pdev, tx_fd->data,
1587 skb->len, PCI_DMA_TODEVICE);
1588 if (tx_fd->state & FrameEnd) {
1589 stats->tx_packets++;
1590 stats->tx_bytes += skb->len;
1591 }
1592 dev_kfree_skb_irq(skb);
1593 dpriv->tx_skbuff[cur] = NULL;
1594 ++dpriv->tx_dirty;
1595 } else {
1596 if (debug > 1)
1597 printk(KERN_ERR "%s Tx: NULL skb %d\n",
1598 dev->name, cur);
1599 }
1600 /*
1601 * If the driver ends sending crap on the wire, it
1602 * will be way easier to diagnose than the (not so)
1603 * random freeze induced by null sized tx frames.
1604 */
1605 tx_fd->data = tx_fd->next;
1606 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1607 tx_fd->complete = 0x00000000;
1608 tx_fd->jiffies = 0;
1609
1610 if (!(state &= ~Alls))
1611 goto try;
1612 }
1613 /*
1614 * Transmit Data Underrun
1615 */
1616 if (state & Xdu) {
1617 printk(KERN_ERR "%s: XDU. Ask maintainer\n", DRV_NAME);
1618 dpriv->flags = NeedIDT;
1619 /* Tx reset */
1620 writel(MTFi | Rdt,
1621 dpriv->base_addr + 0x0c*dpriv->dev_id + CH0CFG);
1622 writel(Action, dpriv->base_addr + GCMDR);
1623 return;
1624 }
1625 if (state & Cts) {
1626 printk(KERN_INFO "%s: CTS transition\n", dev->name);
1627 if (!(state &= ~Cts)) /* DEBUG */
1628 goto try;
1629 }
1630 if (state & Xmr) {
1631 /* Frame needs to be sent again - FIXME */
1632 printk(KERN_ERR "%s: Xmr. Ask maintainer\n", DRV_NAME);
1633 if (!(state &= ~Xmr)) /* DEBUG */
1634 goto try;
1635 }
1636 if (state & Xpr) {
1637 void __iomem *scc_addr;
1638 unsigned long ring;
1639 int i;
1640
1641 /*
1642 * - the busy condition happens (sometimes);
1643 * - it doesn't seem to make the handler unreliable.
1644 */
1645 for (i = 1; i; i <<= 1) {
1646 if (!(scc_readl_star(dpriv, dev) & SccBusy))
1647 break;
1648 }
1649 if (!i)
1650 printk(KERN_INFO "%s busy in irq\n", dev->name);
1651
1652 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1653 /* Keep this order: IDT before IDR */
1654 if (dpriv->flags & NeedIDT) {
1655 if (debug > 2)
1656 dscc4_tx_print(dev, dpriv, "Xpr");
1657 ring = dpriv->tx_fd_dma +
1658 (dpriv->tx_dirty%TX_RING_SIZE)*
1659 sizeof(struct TxFD);
1660 writel(ring, scc_addr + CH0BTDA);
1661 dscc4_do_tx(dpriv, dev);
1662 writel(MTFi | Idt, scc_addr + CH0CFG);
1663 if (dscc4_do_action(dev, "IDT") < 0)
1664 goto err_xpr;
1665 dpriv->flags &= ~NeedIDT;
1666 }
1667 if (dpriv->flags & NeedIDR) {
1668 ring = dpriv->rx_fd_dma +
1669 (dpriv->rx_current%RX_RING_SIZE)*
1670 sizeof(struct RxFD);
1671 writel(ring, scc_addr + CH0BRDA);
1672 dscc4_rx_update(dpriv, dev);
1673 writel(MTFi | Idr, scc_addr + CH0CFG);
1674 if (dscc4_do_action(dev, "IDR") < 0)
1675 goto err_xpr;
1676 dpriv->flags &= ~NeedIDR;
1677 smp_wmb();
1678 /* Activate receiver and misc */
1679 scc_writel(0x08050008, dpriv, dev, CCR2);
1680 }
1681 err_xpr:
1682 if (!(state &= ~Xpr))
1683 goto try;
1684 }
1685 if (state & Cd) {
1686 if (debug > 0)
1687 printk(KERN_INFO "%s: CD transition\n", dev->name);
1688 if (!(state &= ~Cd)) /* DEBUG */
1689 goto try;
1690 }
1691 } else { /* ! SccEvt */
1692 if (state & Hi) {
1693#ifdef DSCC4_POLLING
1694 while (!dscc4_tx_poll(dpriv, dev));
1695#endif
1696 printk(KERN_INFO "%s: Tx Hi\n", dev->name);
1697 state &= ~Hi;
1698 }
1699 if (state & Err) {
1700 printk(KERN_INFO "%s: Tx ERR\n", dev->name);
1701 hdlc_stats(dev)->tx_errors++;
1702 state &= ~Err;
1703 }
1704 }
1705 goto try;
1706}
1707
1708static void dscc4_rx_irq(struct dscc4_pci_priv *priv,
1709 struct dscc4_dev_priv *dpriv)
1710{
1711 struct net_device *dev = dscc4_to_dev(dpriv);
1712 u32 state;
1713 int cur;
1714
1715try:
1716 cur = dpriv->iqrx_current%IRQ_RING_SIZE;
1717 state = dpriv->iqrx[cur];
1718 if (!state)
1719 return;
1720 dpriv->iqrx[cur] = 0;
1721 dpriv->iqrx_current++;
1722
1723 if (state_check(state, dpriv, dev, "Rx") < 0)
1724 return;
1725
1726 if (!(state & SccEvt)){
1727 struct RxFD *rx_fd;
1728
1729 if (debug > 4)
1730 printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name,
1731 state);
1732 state &= 0x00ffffff;
1733 if (state & Err) { /* Hold or reset */
1734 printk(KERN_DEBUG "%s: Rx ERR\n", dev->name);
1735 cur = dpriv->rx_current%RX_RING_SIZE;
1736 rx_fd = dpriv->rx_fd + cur;
1737 /*
1738 * Presume we're not facing a DMAC receiver reset.
1739 * As We use the rx size-filtering feature of the
1740 * DSCC4, the beginning of a new frame is waiting in
1741 * the rx fifo. I bet a Receive Data Overflow will
1742 * happen most of time but let's try and avoid it.
1743 * Btw (as for RDO) if one experiences ERR whereas
1744 * the system looks rather idle, there may be a
1745 * problem with latency. In this case, increasing
1746 * RX_RING_SIZE may help.
1747 */
1748 //while (dpriv->rx_needs_refill) {
1749 while (!(rx_fd->state1 & Hold)) {
1750 rx_fd++;
1751 cur++;
1752 if (!(cur = cur%RX_RING_SIZE))
1753 rx_fd = dpriv->rx_fd;
1754 }
1755 //dpriv->rx_needs_refill--;
1756 try_get_rx_skb(dpriv, dev);
1757 if (!rx_fd->data)
1758 goto try;
1759 rx_fd->state1 &= ~Hold;
1760 rx_fd->state2 = 0x00000000;
1761 rx_fd->end = 0xbabeface;
1762 //}
1763 goto try;
1764 }
1765 if (state & Fi) {
1766 dscc4_rx_skb(dpriv, dev);
1767 goto try;
1768 }
1769 if (state & Hi ) { /* HI bit */
1770 printk(KERN_INFO "%s: Rx Hi\n", dev->name);
1771 state &= ~Hi;
1772 goto try;
1773 }
1774 } else { /* SccEvt */
1775 if (debug > 1) {
1776 //FIXME: verifier la presence de tous les evenements
1777 static struct {
1778 u32 mask;
1779 const char *irq_name;
1780 } evts[] = {
1781 { 0x00008000, "TIN"},
1782 { 0x00000020, "RSC"},
1783 { 0x00000010, "PCE"},
1784 { 0x00000008, "PLLA"},
1785 { 0, NULL}
1786 }, *evt;
1787
1788 for (evt = evts; evt->irq_name; evt++) {
1789 if (state & evt->mask) {
1790 printk(KERN_DEBUG "%s: %s\n",
1791 dev->name, evt->irq_name);
1792 if (!(state &= ~evt->mask))
1793 goto try;
1794 }
1795 }
1796 } else {
1797 if (!(state &= ~0x0000c03c))
1798 goto try;
1799 }
1800 if (state & Cts) {
1801 printk(KERN_INFO "%s: CTS transition\n", dev->name);
1802 if (!(state &= ~Cts)) /* DEBUG */
1803 goto try;
1804 }
1805 /*
1806 * Receive Data Overflow (FIXME: fscked)
1807 */
1808 if (state & Rdo) {
1809 struct RxFD *rx_fd;
1810 void __iomem *scc_addr;
1811 int cur;
1812
1813 //if (debug)
1814 // dscc4_rx_dump(dpriv);
1815 scc_addr = dpriv->base_addr + 0x0c*dpriv->dev_id;
1816
1817 scc_patchl(RxActivate, 0, dpriv, dev, CCR2);
1818 /*
1819 * This has no effect. Why ?
1820 * ORed with TxSccRes, one sees the CFG ack (for
1821 * the TX part only).
1822 */
1823 scc_writel(RxSccRes, dpriv, dev, CMDR);
1824 dpriv->flags |= RdoSet;
1825
1826 /*
1827 * Let's try and save something in the received data.
1828 * rx_current must be incremented at least once to
1829 * avoid HOLD in the BRDA-to-be-pointed desc.
1830 */
1831 do {
1832 cur = dpriv->rx_current++%RX_RING_SIZE;
1833 rx_fd = dpriv->rx_fd + cur;
1834 if (!(rx_fd->state2 & DataComplete))
1835 break;
1836 if (rx_fd->state2 & FrameAborted) {
1837 hdlc_stats(dev)->rx_over_errors++;
1838 rx_fd->state1 |= Hold;
1839 rx_fd->state2 = 0x00000000;
1840 rx_fd->end = 0xbabeface;
1841 } else
1842 dscc4_rx_skb(dpriv, dev);
1843 } while (1);
1844
1845 if (debug > 0) {
1846 if (dpriv->flags & RdoSet)
1847 printk(KERN_DEBUG
1848 "%s: no RDO in Rx data\n", DRV_NAME);
1849 }
1850#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
1851 /*
1852 * FIXME: must the reset be this violent ?
1853 */
1854#warning "FIXME: CH0BRDA"
1855 writel(dpriv->rx_fd_dma +
1856 (dpriv->rx_current%RX_RING_SIZE)*
1857 sizeof(struct RxFD), scc_addr + CH0BRDA);
1858 writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
1859 if (dscc4_do_action(dev, "RDR") < 0) {
1860 printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
1861 dev->name, "RDR");
1862 goto rdo_end;
1863 }
1864 writel(MTFi|Idr, scc_addr + CH0CFG);
1865 if (dscc4_do_action(dev, "IDR") < 0) {
1866 printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
1867 dev->name, "IDR");
1868 goto rdo_end;
1869 }
1870 rdo_end:
1871#endif
1872 scc_patchl(0, RxActivate, dpriv, dev, CCR2);
1873 goto try;
1874 }
1875 if (state & Cd) {
1876 printk(KERN_INFO "%s: CD transition\n", dev->name);
1877 if (!(state &= ~Cd)) /* DEBUG */
1878 goto try;
1879 }
1880 if (state & Flex) {
1881 printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
1882 if (!(state &= ~Flex))
1883 goto try;
1884 }
1885 }
1886}
1887
1888/*
1889 * I had expected the following to work for the first descriptor
1890 * (tx_fd->state = 0xc0000000)
1891 * - Hold=1 (don't try and branch to the next descripto);
1892 * - No=0 (I want an empty data section, i.e. size=0);
1893 * - Fe=1 (required by No=0 or we got an Err irq and must reset).
1894 * It failed and locked solid. Thus the introduction of a dummy skb.
1895 * Problem is acknowledged in errata sheet DS5. Joy :o/
1896 */
1897struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
1898{
1899 struct sk_buff *skb;
1900
1901 skb = dev_alloc_skb(DUMMY_SKB_SIZE);
1902 if (skb) {
1903 int last = dpriv->tx_dirty%TX_RING_SIZE;
1904 struct TxFD *tx_fd = dpriv->tx_fd + last;
1905
1906 skb->len = DUMMY_SKB_SIZE;
1907 memcpy(skb->data, version, strlen(version)%DUMMY_SKB_SIZE);
1908 tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
1909 tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data,
1910 DUMMY_SKB_SIZE, PCI_DMA_TODEVICE);
1911 dpriv->tx_skbuff[last] = skb;
1912 }
1913 return skb;
1914}
1915
1916static int dscc4_init_ring(struct net_device *dev)
1917{
1918 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
1919 struct pci_dev *pdev = dpriv->pci_priv->pdev;
1920 struct TxFD *tx_fd;
1921 struct RxFD *rx_fd;
1922 void *ring;
1923 int i;
1924
1925 ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma);
1926 if (!ring)
1927 goto err_out;
1928 dpriv->rx_fd = rx_fd = (struct RxFD *) ring;
1929
1930 ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma);
1931 if (!ring)
1932 goto err_free_dma_rx;
1933 dpriv->tx_fd = tx_fd = (struct TxFD *) ring;
1934
1935 memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE);
1936 dpriv->tx_dirty = 0xffffffff;
1937 i = dpriv->tx_current = 0;
1938 do {
1939 tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
1940 tx_fd->complete = 0x00000000;
1941 /* FIXME: NULL should be ok - to be tried */
1942 tx_fd->data = dpriv->tx_fd_dma;
1943 (tx_fd++)->next = (u32)(dpriv->tx_fd_dma +
1944 (++i%TX_RING_SIZE)*sizeof(*tx_fd));
1945 } while (i < TX_RING_SIZE);
1946
1947 if (dscc4_init_dummy_skb(dpriv) < 0)
1948 goto err_free_dma_tx;
1949
1950 memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE);
1951 i = dpriv->rx_dirty = dpriv->rx_current = 0;
1952 do {
1953 /* size set by the host. Multiple of 4 bytes please */
1954 rx_fd->state1 = HiDesc;
1955 rx_fd->state2 = 0x00000000;
1956 rx_fd->end = 0xbabeface;
1957 rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU);
1958 // FIXME: return value verifiee mais traitement suspect
1959 if (try_get_rx_skb(dpriv, dev) >= 0)
1960 dpriv->rx_dirty++;
1961 (rx_fd++)->next = (u32)(dpriv->rx_fd_dma +
1962 (++i%RX_RING_SIZE)*sizeof(*rx_fd));
1963 } while (i < RX_RING_SIZE);
1964
1965 return 0;
1966
1967err_free_dma_tx:
1968 pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma);
1969err_free_dma_rx:
1970 pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
1971err_out:
1972 return -ENOMEM;
1973}
1974
1975static void __devexit dscc4_remove_one(struct pci_dev *pdev)
1976{
1977 struct dscc4_pci_priv *ppriv;
1978 struct dscc4_dev_priv *root;
1979 void __iomem *ioaddr;
1980 int i;
1981
1982 ppriv = pci_get_drvdata(pdev);
1983 root = ppriv->root;
1984
1985 ioaddr = root->base_addr;
1986
1987 dscc4_pci_reset(pdev, ioaddr);
1988
1989 free_irq(pdev->irq, root);
1990 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
1991 ppriv->iqcfg_dma);
1992 for (i = 0; i < dev_per_card; i++) {
1993 struct dscc4_dev_priv *dpriv = root + i;
1994
1995 dscc4_release_ring(dpriv);
1996 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
1997 dpriv->iqrx, dpriv->iqrx_dma);
1998 pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
1999 dpriv->iqtx, dpriv->iqtx_dma);
2000 }
2001
2002 dscc4_free1(pdev);
2003
2004 iounmap(ioaddr);
2005
2006 pci_release_region(pdev, 1);
2007 pci_release_region(pdev, 0);
2008
2009 pci_disable_device(pdev);
2010}
2011
2012static int dscc4_hdlc_attach(struct net_device *dev, unsigned short encoding,
2013 unsigned short parity)
2014{
2015 struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
2016
2017 if (encoding != ENCODING_NRZ &&
2018 encoding != ENCODING_NRZI &&
2019 encoding != ENCODING_FM_MARK &&
2020 encoding != ENCODING_FM_SPACE &&
2021 encoding != ENCODING_MANCHESTER)
2022 return -EINVAL;
2023
2024 if (parity != PARITY_NONE &&
2025 parity != PARITY_CRC16_PR0_CCITT &&
2026 parity != PARITY_CRC16_PR1_CCITT &&
2027 parity != PARITY_CRC32_PR0_CCITT &&
2028 parity != PARITY_CRC32_PR1_CCITT)
2029 return -EINVAL;
2030
2031 dpriv->encoding = encoding;
2032 dpriv->parity = parity;
2033 return 0;
2034}
2035
2036#ifndef MODULE
2037static int __init dscc4_setup(char *str)
2038{
2039 int *args[] = { &debug, &quartz, NULL }, **p = args;
2040
2041 while (*p && (get_option(&str, *p) == 2))
2042 p++;
2043 return 1;
2044}
2045
2046__setup("dscc4.setup=", dscc4_setup);
2047#endif
2048
2049static struct pci_device_id dscc4_pci_tbl[] = {
2050 { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
2051 PCI_ANY_ID, PCI_ANY_ID, },
2052 { 0,}
2053};
2054MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl);
2055
2056static struct pci_driver dscc4_driver = {
2057 .name = DRV_NAME,
2058 .id_table = dscc4_pci_tbl,
2059 .probe = dscc4_init_one,
2060 .remove = __devexit_p(dscc4_remove_one),
2061};
2062
2063static int __init dscc4_init_module(void)
2064{
2065 return pci_module_init(&dscc4_driver);
2066}
2067
2068static void __exit dscc4_cleanup_module(void)
2069{
2070 pci_unregister_driver(&dscc4_driver);
2071}
2072
2073module_init(dscc4_init_module);
2074module_exit(dscc4_cleanup_module);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
new file mode 100644
index 000000000000..7575b799ce53
--- /dev/null
+++ b/drivers/net/wan/farsync.c
@@ -0,0 +1,2712 @@
1/*
2 * FarSync WAN driver for Linux (2.6.x kernel version)
3 *
4 * Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
5 *
6 * Copyright (C) 2001-2004 FarSite Communications Ltd.
7 * www.farsite.co.uk
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * Author: R.J.Dunlop <bob.dunlop@farsite.co.uk>
15 * Maintainer: Kevin Curtis <kevin.curtis@farsite.co.uk>
16 */
17
18#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/version.h>
21#include <linux/pci.h>
22#include <linux/ioport.h>
23#include <linux/init.h>
24#include <linux/if.h>
25#include <linux/hdlc.h>
26#include <asm/io.h>
27#include <asm/uaccess.h>
28
29#include "farsync.h"
30
31/*
32 * Module info
33 */
34MODULE_AUTHOR("R.J.Dunlop <bob.dunlop@farsite.co.uk>");
35MODULE_DESCRIPTION("FarSync T-Series WAN driver. FarSite Communications Ltd.");
36MODULE_LICENSE("GPL");
37
38/* Driver configuration and global parameters
39 * ==========================================
40 */
41
42/* Number of ports (per card) and cards supported
43 */
44#define FST_MAX_PORTS 4
45#define FST_MAX_CARDS 32
46
47/* Default parameters for the link
48 */
49#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
50 * useful, the syncppp module forces
51 * this down assuming a slower line I
52 * guess.
53 */
54#define FST_TXQ_DEPTH 16 /* This one is for the buffering
55 * of frames on the way down to the card
56 * so that we can keep the card busy
57 * and maximise throughput
58 */
59#define FST_HIGH_WATER_MARK 12 /* Point at which we flow control
60 * network layer */
61#define FST_LOW_WATER_MARK 8 /* Point at which we remove flow
62 * control from network layer */
63#define FST_MAX_MTU 8000 /* Huge but possible */
64#define FST_DEF_MTU 1500 /* Common sane value */
65
66#define FST_TX_TIMEOUT (2*HZ)
67
68#ifdef ARPHRD_RAWHDLC
69#define ARPHRD_MYTYPE ARPHRD_RAWHDLC /* Raw frames */
70#else
71#define ARPHRD_MYTYPE ARPHRD_HDLC /* Cisco-HDLC (keepalives etc) */
72#endif
73
74/*
75 * Modules parameters and associated varaibles
76 */
77int fst_txq_low = FST_LOW_WATER_MARK;
78int fst_txq_high = FST_HIGH_WATER_MARK;
79int fst_max_reads = 7;
80int fst_excluded_cards = 0;
81int fst_excluded_list[FST_MAX_CARDS];
82
83module_param(fst_txq_low, int, 0);
84module_param(fst_txq_high, int, 0);
85module_param(fst_max_reads, int, 0);
86module_param(fst_excluded_cards, int, 0);
87module_param_array(fst_excluded_list, int, NULL, 0);
88
89/* Card shared memory layout
90 * =========================
91 */
92#pragma pack(1)
93
94/* This information is derived in part from the FarSite FarSync Smc.h
95 * file. Unfortunately various name clashes and the non-portability of the
96 * bit field declarations in that file have meant that I have chosen to
97 * recreate the information here.
98 *
99 * The SMC (Shared Memory Configuration) has a version number that is
100 * incremented every time there is a significant change. This number can
101 * be used to check that we have not got out of step with the firmware
102 * contained in the .CDE files.
103 */
104#define SMC_VERSION 24
105
106#define FST_MEMSIZE 0x100000 /* Size of card memory (1Mb) */
107
108#define SMC_BASE 0x00002000L /* Base offset of the shared memory window main
109 * configuration structure */
110#define BFM_BASE 0x00010000L /* Base offset of the shared memory window DMA
111 * buffers */
112
113#define LEN_TX_BUFFER 8192 /* Size of packet buffers */
114#define LEN_RX_BUFFER 8192
115
116#define LEN_SMALL_TX_BUFFER 256 /* Size of obsolete buffs used for DOS diags */
117#define LEN_SMALL_RX_BUFFER 256
118
119#define NUM_TX_BUFFER 2 /* Must be power of 2. Fixed by firmware */
120#define NUM_RX_BUFFER 8
121
122/* Interrupt retry time in milliseconds */
123#define INT_RETRY_TIME 2
124
125/* The Am186CH/CC processors support a SmartDMA mode using circular pools
126 * of buffer descriptors. The structure is almost identical to that used
127 * in the LANCE Ethernet controllers. Details available as PDF from the
128 * AMD web site: http://www.amd.com/products/epd/processors/\
129 * 2.16bitcont/3.am186cxfa/a21914/21914.pdf
130 */
131struct txdesc { /* Transmit descriptor */
132 volatile u16 ladr; /* Low order address of packet. This is a
133 * linear address in the Am186 memory space
134 */
135 volatile u8 hadr; /* High order address. Low 4 bits only, high 4
136 * bits must be zero
137 */
138 volatile u8 bits; /* Status and config */
139 volatile u16 bcnt; /* 2s complement of packet size in low 15 bits.
140 * Transmit terminal count interrupt enable in
141 * top bit.
142 */
143 u16 unused; /* Not used in Tx */
144};
145
146struct rxdesc { /* Receive descriptor */
147 volatile u16 ladr; /* Low order address of packet */
148 volatile u8 hadr; /* High order address */
149 volatile u8 bits; /* Status and config */
150 volatile u16 bcnt; /* 2s complement of buffer size in low 15 bits.
151 * Receive terminal count interrupt enable in
152 * top bit.
153 */
154 volatile u16 mcnt; /* Message byte count (15 bits) */
155};
156
157/* Convert a length into the 15 bit 2's complement */
158/* #define cnv_bcnt(len) (( ~(len) + 1 ) & 0x7FFF ) */
159/* Since we need to set the high bit to enable the completion interrupt this
160 * can be made a lot simpler
161 */
162#define cnv_bcnt(len) (-(len))
163
164/* Status and config bits for the above */
165#define DMA_OWN 0x80 /* SmartDMA owns the descriptor */
166#define TX_STP 0x02 /* Tx: start of packet */
167#define TX_ENP 0x01 /* Tx: end of packet */
168#define RX_ERR 0x40 /* Rx: error (OR of next 4 bits) */
169#define RX_FRAM 0x20 /* Rx: framing error */
170#define RX_OFLO 0x10 /* Rx: overflow error */
171#define RX_CRC 0x08 /* Rx: CRC error */
172#define RX_HBUF 0x04 /* Rx: buffer error */
173#define RX_STP 0x02 /* Rx: start of packet */
174#define RX_ENP 0x01 /* Rx: end of packet */
175
176/* Interrupts from the card are caused by various events which are presented
177 * in a circular buffer as several events may be processed on one physical int
178 */
179#define MAX_CIRBUFF 32
180
181struct cirbuff {
182 u8 rdindex; /* read, then increment and wrap */
183 u8 wrindex; /* write, then increment and wrap */
184 u8 evntbuff[MAX_CIRBUFF];
185};
186
187/* Interrupt event codes.
188 * Where appropriate the two low order bits indicate the port number
189 */
190#define CTLA_CHG 0x18 /* Control signal changed */
191#define CTLB_CHG 0x19
192#define CTLC_CHG 0x1A
193#define CTLD_CHG 0x1B
194
195#define INIT_CPLT 0x20 /* Initialisation complete */
196#define INIT_FAIL 0x21 /* Initialisation failed */
197
198#define ABTA_SENT 0x24 /* Abort sent */
199#define ABTB_SENT 0x25
200#define ABTC_SENT 0x26
201#define ABTD_SENT 0x27
202
203#define TXA_UNDF 0x28 /* Transmission underflow */
204#define TXB_UNDF 0x29
205#define TXC_UNDF 0x2A
206#define TXD_UNDF 0x2B
207
208#define F56_INT 0x2C
209#define M32_INT 0x2D
210
211#define TE1_ALMA 0x30
212
213/* Port physical configuration. See farsync.h for field values */
214struct port_cfg {
215 u16 lineInterface; /* Physical interface type */
216 u8 x25op; /* Unused at present */
217 u8 internalClock; /* 1 => internal clock, 0 => external */
218 u8 transparentMode; /* 1 => on, 0 => off */
219 u8 invertClock; /* 0 => normal, 1 => inverted */
220 u8 padBytes[6]; /* Padding */
221 u32 lineSpeed; /* Speed in bps */
222};
223
224/* TE1 port physical configuration */
225struct su_config {
226 u32 dataRate;
227 u8 clocking;
228 u8 framing;
229 u8 structure;
230 u8 interface;
231 u8 coding;
232 u8 lineBuildOut;
233 u8 equalizer;
234 u8 transparentMode;
235 u8 loopMode;
236 u8 range;
237 u8 txBufferMode;
238 u8 rxBufferMode;
239 u8 startingSlot;
240 u8 losThreshold;
241 u8 enableIdleCode;
242 u8 idleCode;
243 u8 spare[44];
244};
245
246/* TE1 Status */
247struct su_status {
248 u32 receiveBufferDelay;
249 u32 framingErrorCount;
250 u32 codeViolationCount;
251 u32 crcErrorCount;
252 u32 lineAttenuation;
253 u8 portStarted;
254 u8 lossOfSignal;
255 u8 receiveRemoteAlarm;
256 u8 alarmIndicationSignal;
257 u8 spare[40];
258};
259
260/* Finally sling all the above together into the shared memory structure.
261 * Sorry it's a hodge podge of arrays, structures and unused bits, it's been
262 * evolving under NT for some time so I guess we're stuck with it.
263 * The structure starts at offset SMC_BASE.
264 * See farsync.h for some field values.
265 */
266struct fst_shared {
267 /* DMA descriptor rings */
268 struct rxdesc rxDescrRing[FST_MAX_PORTS][NUM_RX_BUFFER];
269 struct txdesc txDescrRing[FST_MAX_PORTS][NUM_TX_BUFFER];
270
271 /* Obsolete small buffers */
272 u8 smallRxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_SMALL_RX_BUFFER];
273 u8 smallTxBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_SMALL_TX_BUFFER];
274
275 u8 taskStatus; /* 0x00 => initialising, 0x01 => running,
276 * 0xFF => halted
277 */
278
279 u8 interruptHandshake; /* Set to 0x01 by adapter to signal interrupt,
280 * set to 0xEE by host to acknowledge interrupt
281 */
282
283 u16 smcVersion; /* Must match SMC_VERSION */
284
285 u32 smcFirmwareVersion; /* 0xIIVVRRBB where II = product ID, VV = major
286 * version, RR = revision and BB = build
287 */
288
289 u16 txa_done; /* Obsolete completion flags */
290 u16 rxa_done;
291 u16 txb_done;
292 u16 rxb_done;
293 u16 txc_done;
294 u16 rxc_done;
295 u16 txd_done;
296 u16 rxd_done;
297
298 u16 mailbox[4]; /* Diagnostics mailbox. Not used */
299
300 struct cirbuff interruptEvent; /* interrupt causes */
301
302 u32 v24IpSts[FST_MAX_PORTS]; /* V.24 control input status */
303 u32 v24OpSts[FST_MAX_PORTS]; /* V.24 control output status */
304
305 struct port_cfg portConfig[FST_MAX_PORTS];
306
307 u16 clockStatus[FST_MAX_PORTS]; /* lsb: 0=> present, 1=> absent */
308
309 u16 cableStatus; /* lsb: 0=> present, 1=> absent */
310
311 u16 txDescrIndex[FST_MAX_PORTS]; /* transmit descriptor ring index */
312 u16 rxDescrIndex[FST_MAX_PORTS]; /* receive descriptor ring index */
313
314 u16 portMailbox[FST_MAX_PORTS][2]; /* command, modifier */
315 u16 cardMailbox[4]; /* Not used */
316
317 /* Number of times the card thinks the host has
318 * missed an interrupt by not acknowledging
319 * within 2mS (I guess NT has problems)
320 */
321 u32 interruptRetryCount;
322
323 /* Driver private data used as an ID. We'll not
324 * use this as I'd rather keep such things
325 * in main memory rather than on the PCI bus
326 */
327 u32 portHandle[FST_MAX_PORTS];
328
329 /* Count of Tx underflows for stats */
330 u32 transmitBufferUnderflow[FST_MAX_PORTS];
331
332 /* Debounced V.24 control input status */
333 u32 v24DebouncedSts[FST_MAX_PORTS];
334
335 /* Adapter debounce timers. Don't touch */
336 u32 ctsTimer[FST_MAX_PORTS];
337 u32 ctsTimerRun[FST_MAX_PORTS];
338 u32 dcdTimer[FST_MAX_PORTS];
339 u32 dcdTimerRun[FST_MAX_PORTS];
340
341 u32 numberOfPorts; /* Number of ports detected at startup */
342
343 u16 _reserved[64];
344
345 u16 cardMode; /* Bit-mask to enable features:
346 * Bit 0: 1 enables LED identify mode
347 */
348
349 u16 portScheduleOffset;
350
351 struct su_config suConfig; /* TE1 Bits */
352 struct su_status suStatus;
353
354 u32 endOfSmcSignature; /* endOfSmcSignature MUST be the last member of
355 * the structure and marks the end of shared
356 * memory. Adapter code initializes it as
357 * END_SIG.
358 */
359};
360
361/* endOfSmcSignature value */
362#define END_SIG 0x12345678
363
364/* Mailbox values. (portMailbox) */
365#define NOP 0 /* No operation */
366#define ACK 1 /* Positive acknowledgement to PC driver */
367#define NAK 2 /* Negative acknowledgement to PC driver */
368#define STARTPORT 3 /* Start an HDLC port */
369#define STOPPORT 4 /* Stop an HDLC port */
370#define ABORTTX 5 /* Abort the transmitter for a port */
371#define SETV24O 6 /* Set V24 outputs */
372
373/* PLX Chip Register Offsets */
374#define CNTRL_9052 0x50 /* Control Register */
375#define CNTRL_9054 0x6c /* Control Register */
376
377#define INTCSR_9052 0x4c /* Interrupt control/status register */
378#define INTCSR_9054 0x68 /* Interrupt control/status register */
379
380/* 9054 DMA Registers */
381/*
382 * Note that we will be using DMA Channel 0 for copying rx data
383 * and Channel 1 for copying tx data
384 */
385#define DMAMODE0 0x80
386#define DMAPADR0 0x84
387#define DMALADR0 0x88
388#define DMASIZ0 0x8c
389#define DMADPR0 0x90
390#define DMAMODE1 0x94
391#define DMAPADR1 0x98
392#define DMALADR1 0x9c
393#define DMASIZ1 0xa0
394#define DMADPR1 0xa4
395#define DMACSR0 0xa8
396#define DMACSR1 0xa9
397#define DMAARB 0xac
398#define DMATHR 0xb0
399#define DMADAC0 0xb4
400#define DMADAC1 0xb8
401#define DMAMARBR 0xac
402
403#define FST_MIN_DMA_LEN 64
404#define FST_RX_DMA_INT 0x01
405#define FST_TX_DMA_INT 0x02
406#define FST_CARD_INT 0x04
407
408/* Larger buffers are positioned in memory at offset BFM_BASE */
409struct buf_window {
410 u8 txBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_TX_BUFFER];
411 u8 rxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_RX_BUFFER];
412};
413
414/* Calculate offset of a buffer object within the shared memory window */
415#define BUF_OFFSET(X) (BFM_BASE + offsetof(struct buf_window, X))
416
417#pragma pack()
418
419/* Device driver private information
420 * =================================
421 */
422/* Per port (line or channel) information
423 */
424struct fst_port_info {
425 struct net_device *dev; /* Device struct - must be first */
426 struct fst_card_info *card; /* Card we're associated with */
427 int index; /* Port index on the card */
428 int hwif; /* Line hardware (lineInterface copy) */
429 int run; /* Port is running */
430 int mode; /* Normal or FarSync raw */
431 int rxpos; /* Next Rx buffer to use */
432 int txpos; /* Next Tx buffer to use */
433 int txipos; /* Next Tx buffer to check for free */
434 int start; /* Indication of start/stop to network */
435 /*
436 * A sixteen entry transmit queue
437 */
438 int txqs; /* index to get next buffer to tx */
439 int txqe; /* index to queue next packet */
440 struct sk_buff *txq[FST_TXQ_DEPTH]; /* The queue */
441 int rxqdepth;
442};
443
444/* Per card information
445 */
446struct fst_card_info {
447 char __iomem *mem; /* Card memory mapped to kernel space */
448 char __iomem *ctlmem; /* Control memory for PCI cards */
449 unsigned int phys_mem; /* Physical memory window address */
450 unsigned int phys_ctlmem; /* Physical control memory address */
451 unsigned int irq; /* Interrupt request line number */
452 unsigned int nports; /* Number of serial ports */
453 unsigned int type; /* Type index of card */
454 unsigned int state; /* State of card */
455 spinlock_t card_lock; /* Lock for SMP access */
456 unsigned short pci_conf; /* PCI card config in I/O space */
457 /* Per port info */
458 struct fst_port_info ports[FST_MAX_PORTS];
459 struct pci_dev *device; /* Information about the pci device */
460 int card_no; /* Inst of the card on the system */
461 int family; /* TxP or TxU */
462 int dmarx_in_progress;
463 int dmatx_in_progress;
464 unsigned long int_count;
465 unsigned long int_time_ave;
466 void *rx_dma_handle_host;
467 dma_addr_t rx_dma_handle_card;
468 void *tx_dma_handle_host;
469 dma_addr_t tx_dma_handle_card;
470 struct sk_buff *dma_skb_rx;
471 struct fst_port_info *dma_port_rx;
472 struct fst_port_info *dma_port_tx;
473 int dma_len_rx;
474 int dma_len_tx;
475 int dma_txpos;
476 int dma_rxpos;
477};
478
479/* Convert an HDLC device pointer into a port info pointer and similar */
480#define dev_to_port(D) (dev_to_hdlc(D)->priv)
481#define port_to_dev(P) ((P)->dev)
482
483
484/*
485 * Shared memory window access macros
486 *
487 * We have a nice memory based structure above, which could be directly
488 * mapped on i386 but might not work on other architectures unless we use
489 * the readb,w,l and writeb,w,l macros. Unfortunately these macros take
490 * physical offsets so we have to convert. The only saving grace is that
491 * this should all collapse back to a simple indirection eventually.
492 */
493#define WIN_OFFSET(X) ((long)&(((struct fst_shared *)SMC_BASE)->X))
494
495#define FST_RDB(C,E) readb ((C)->mem + WIN_OFFSET(E))
496#define FST_RDW(C,E) readw ((C)->mem + WIN_OFFSET(E))
497#define FST_RDL(C,E) readl ((C)->mem + WIN_OFFSET(E))
498
499#define FST_WRB(C,E,B) writeb ((B), (C)->mem + WIN_OFFSET(E))
500#define FST_WRW(C,E,W) writew ((W), (C)->mem + WIN_OFFSET(E))
501#define FST_WRL(C,E,L) writel ((L), (C)->mem + WIN_OFFSET(E))
502
503/*
504 * Debug support
505 */
506#if FST_DEBUG
507
508static int fst_debug_mask = { FST_DEBUG };
509
510/* Most common debug activity is to print something if the corresponding bit
511 * is set in the debug mask. Note: this uses a non-ANSI extension in GCC to
512 * support variable numbers of macro parameters. The inverted if prevents us
513 * eating someone else's else clause.
514 */
515#define dbg(F,fmt,A...) if ( ! ( fst_debug_mask & (F))) \
516 ; \
517 else \
518 printk ( KERN_DEBUG FST_NAME ": " fmt, ## A )
519
520#else
521#define dbg(X...) /* NOP */
522#endif
523
524/* Printing short cuts
525 */
526#define printk_err(fmt,A...) printk ( KERN_ERR FST_NAME ": " fmt, ## A )
527#define printk_warn(fmt,A...) printk ( KERN_WARNING FST_NAME ": " fmt, ## A )
528#define printk_info(fmt,A...) printk ( KERN_INFO FST_NAME ": " fmt, ## A )
529
530/*
531 * PCI ID lookup table
532 */
533static struct pci_device_id fst_pci_dev_id[] __devinitdata = {
534 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
535 PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
536
537 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID,
538 PCI_ANY_ID, 0, 0, FST_TYPE_T4P},
539
540 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID,
541 PCI_ANY_ID, 0, 0, FST_TYPE_T1U},
542
543 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID,
544 PCI_ANY_ID, 0, 0, FST_TYPE_T2U},
545
546 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID,
547 PCI_ANY_ID, 0, 0, FST_TYPE_T4U},
548
549 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID,
550 PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
551
552 {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID,
553 PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
554 {0,} /* End */
555};
556
557MODULE_DEVICE_TABLE(pci, fst_pci_dev_id);
558
559/*
560 * Device Driver Work Queues
561 *
562 * So that we don't spend too much time processing events in the
563 * Interrupt Service routine, we will declare a work queue per Card
564 * and make the ISR schedule a task in the queue for later execution.
565 * In the 2.4 Kernel we used to use the immediate queue for BH's
566 * Now that they are gone, tasklets seem to be much better than work
567 * queues.
568 */
569
570static void do_bottom_half_tx(struct fst_card_info *card);
571static void do_bottom_half_rx(struct fst_card_info *card);
572static void fst_process_tx_work_q(unsigned long work_q);
573static void fst_process_int_work_q(unsigned long work_q);
574
575DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
576DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
577
578struct fst_card_info *fst_card_array[FST_MAX_CARDS];
579spinlock_t fst_work_q_lock;
580u64 fst_work_txq;
581u64 fst_work_intq;
582
583static void
584fst_q_work_item(u64 * queue, int card_index)
585{
586 unsigned long flags;
587 u64 mask;
588
589 /*
590 * Grab the queue exclusively
591 */
592 spin_lock_irqsave(&fst_work_q_lock, flags);
593
594 /*
595 * Making an entry in the queue is simply a matter of setting
596 * a bit for the card indicating that there is work to do in the
597 * bottom half for the card. Note the limitation of 64 cards.
598 * That ought to be enough
599 */
600 mask = 1 << card_index;
601 *queue |= mask;
602 spin_unlock_irqrestore(&fst_work_q_lock, flags);
603}
604
605static void
606fst_process_tx_work_q(unsigned long /*void **/work_q)
607{
608 unsigned long flags;
609 u64 work_txq;
610 int i;
611
612 /*
613 * Grab the queue exclusively
614 */
615 dbg(DBG_TX, "fst_process_tx_work_q\n");
616 spin_lock_irqsave(&fst_work_q_lock, flags);
617 work_txq = fst_work_txq;
618 fst_work_txq = 0;
619 spin_unlock_irqrestore(&fst_work_q_lock, flags);
620
621 /*
622 * Call the bottom half for each card with work waiting
623 */
624 for (i = 0; i < FST_MAX_CARDS; i++) {
625 if (work_txq & 0x01) {
626 if (fst_card_array[i] != NULL) {
627 dbg(DBG_TX, "Calling tx bh for card %d\n", i);
628 do_bottom_half_tx(fst_card_array[i]);
629 }
630 }
631 work_txq = work_txq >> 1;
632 }
633}
634
635static void
636fst_process_int_work_q(unsigned long /*void **/work_q)
637{
638 unsigned long flags;
639 u64 work_intq;
640 int i;
641
642 /*
643 * Grab the queue exclusively
644 */
645 dbg(DBG_INTR, "fst_process_int_work_q\n");
646 spin_lock_irqsave(&fst_work_q_lock, flags);
647 work_intq = fst_work_intq;
648 fst_work_intq = 0;
649 spin_unlock_irqrestore(&fst_work_q_lock, flags);
650
651 /*
652 * Call the bottom half for each card with work waiting
653 */
654 for (i = 0; i < FST_MAX_CARDS; i++) {
655 if (work_intq & 0x01) {
656 if (fst_card_array[i] != NULL) {
657 dbg(DBG_INTR,
658 "Calling rx & tx bh for card %d\n", i);
659 do_bottom_half_rx(fst_card_array[i]);
660 do_bottom_half_tx(fst_card_array[i]);
661 }
662 }
663 work_intq = work_intq >> 1;
664 }
665}
666
667/* Card control functions
668 * ======================
669 */
670/* Place the processor in reset state
671 *
672 * Used to be a simple write to card control space but a glitch in the latest
673 * AMD Am186CH processor means that we now have to do it by asserting and de-
674 * asserting the PLX chip PCI Adapter Software Reset. Bit 30 in CNTRL register
675 * at offset 9052_CNTRL. Note the updates for the TXU.
676 */
677static inline void
678fst_cpureset(struct fst_card_info *card)
679{
680 unsigned char interrupt_line_register;
681 unsigned long j = jiffies + 1;
682 unsigned int regval;
683
684 if (card->family == FST_FAMILY_TXU) {
685 if (pci_read_config_byte
686 (card->device, PCI_INTERRUPT_LINE, &interrupt_line_register)) {
687 dbg(DBG_ASS,
688 "Error in reading interrupt line register\n");
689 }
690 /*
691 * Assert PLX software reset and Am186 hardware reset
692 * and then deassert the PLX software reset but 186 still in reset
693 */
694 outw(0x440f, card->pci_conf + CNTRL_9054 + 2);
695 outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
696 /*
697 * We are delaying here to allow the 9054 to reset itself
698 */
699 j = jiffies + 1;
700 while (jiffies < j)
701 /* Do nothing */ ;
702 outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
703 /*
704 * We are delaying here to allow the 9054 to reload its eeprom
705 */
706 j = jiffies + 1;
707 while (jiffies < j)
708 /* Do nothing */ ;
709 outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
710
711 if (pci_write_config_byte
712 (card->device, PCI_INTERRUPT_LINE, interrupt_line_register)) {
713 dbg(DBG_ASS,
714 "Error in writing interrupt line register\n");
715 }
716
717 } else {
718 regval = inl(card->pci_conf + CNTRL_9052);
719
720 outl(regval | 0x40000000, card->pci_conf + CNTRL_9052);
721 outl(regval & ~0x40000000, card->pci_conf + CNTRL_9052);
722 }
723}
724
725/* Release the processor from reset
726 */
727static inline void
728fst_cpurelease(struct fst_card_info *card)
729{
730 if (card->family == FST_FAMILY_TXU) {
731 /*
732 * Force posted writes to complete
733 */
734 (void) readb(card->mem);
735
736 /*
737 * Release LRESET DO = 1
738 * Then release Local Hold, DO = 1
739 */
740 outw(0x040e, card->pci_conf + CNTRL_9054 + 2);
741 outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
742 } else {
743 (void) readb(card->ctlmem);
744 }
745}
746
747/* Clear the cards interrupt flag
748 */
749static inline void
750fst_clear_intr(struct fst_card_info *card)
751{
752 if (card->family == FST_FAMILY_TXU) {
753 (void) readb(card->ctlmem);
754 } else {
755 /* Poke the appropriate PLX chip register (same as enabling interrupts)
756 */
757 outw(0x0543, card->pci_conf + INTCSR_9052);
758 }
759}
760
761/* Enable card interrupts
762 */
763static inline void
764fst_enable_intr(struct fst_card_info *card)
765{
766 if (card->family == FST_FAMILY_TXU) {
767 outl(0x0f0c0900, card->pci_conf + INTCSR_9054);
768 } else {
769 outw(0x0543, card->pci_conf + INTCSR_9052);
770 }
771}
772
773/* Disable card interrupts
774 */
775static inline void
776fst_disable_intr(struct fst_card_info *card)
777{
778 if (card->family == FST_FAMILY_TXU) {
779 outl(0x00000000, card->pci_conf + INTCSR_9054);
780 } else {
781 outw(0x0000, card->pci_conf + INTCSR_9052);
782 }
783}
784
785/* Process the result of trying to pass a received frame up the stack
786 */
787static void
788fst_process_rx_status(int rx_status, char *name)
789{
790 switch (rx_status) {
791 case NET_RX_SUCCESS:
792 {
793 /*
794 * Nothing to do here
795 */
796 break;
797 }
798
799 case NET_RX_CN_LOW:
800 {
801 dbg(DBG_ASS, "%s: Receive Low Congestion\n", name);
802 break;
803 }
804
805 case NET_RX_CN_MOD:
806 {
807 dbg(DBG_ASS, "%s: Receive Moderate Congestion\n", name);
808 break;
809 }
810
811 case NET_RX_CN_HIGH:
812 {
813 dbg(DBG_ASS, "%s: Receive High Congestion\n", name);
814 break;
815 }
816
817 case NET_RX_DROP:
818 {
819 dbg(DBG_ASS, "%s: Received packet dropped\n", name);
820 break;
821 }
822 }
823}
824
825/* Initilaise DMA for PLX 9054
826 */
827static inline void
828fst_init_dma(struct fst_card_info *card)
829{
830 /*
831 * This is only required for the PLX 9054
832 */
833 if (card->family == FST_FAMILY_TXU) {
834 pci_set_master(card->device);
835 outl(0x00020441, card->pci_conf + DMAMODE0);
836 outl(0x00020441, card->pci_conf + DMAMODE1);
837 outl(0x0, card->pci_conf + DMATHR);
838 }
839}
840
841/* Tx dma complete interrupt
842 */
843static void
844fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
845 int len, int txpos)
846{
847 struct net_device *dev = port_to_dev(port);
848 struct net_device_stats *stats = hdlc_stats(dev);
849
850 /*
851 * Everything is now set, just tell the card to go
852 */
853 dbg(DBG_TX, "fst_tx_dma_complete\n");
854 FST_WRB(card, txDescrRing[port->index][txpos].bits,
855 DMA_OWN | TX_STP | TX_ENP);
856 stats->tx_packets++;
857 stats->tx_bytes += len;
858 dev->trans_start = jiffies;
859}
860
861/*
862 * Mark it for our own raw sockets interface
863 */
864static unsigned short farsync_type_trans(struct sk_buff *skb,
865 struct net_device *dev)
866{
867 skb->dev = dev;
868 skb->mac.raw = skb->data;
869 skb->pkt_type = PACKET_HOST;
870 return htons(ETH_P_CUST);
871}
872
873/* Rx dma complete interrupt
874 */
875static void
876fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
877 int len, struct sk_buff *skb, int rxp)
878{
879 struct net_device *dev = port_to_dev(port);
880 struct net_device_stats *stats = hdlc_stats(dev);
881 int pi;
882 int rx_status;
883
884 dbg(DBG_TX, "fst_rx_dma_complete\n");
885 pi = port->index;
886 memcpy(skb_put(skb, len), card->rx_dma_handle_host, len);
887
888 /* Reset buffer descriptor */
889 FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
890
891 /* Update stats */
892 stats->rx_packets++;
893 stats->rx_bytes += len;
894
895 /* Push upstream */
896 dbg(DBG_RX, "Pushing the frame up the stack\n");
897 if (port->mode == FST_RAW)
898 skb->protocol = farsync_type_trans(skb, dev);
899 else
900 skb->protocol = hdlc_type_trans(skb, dev);
901 rx_status = netif_rx(skb);
902 fst_process_rx_status(rx_status, port_to_dev(port)->name);
903 if (rx_status == NET_RX_DROP)
904 stats->rx_dropped++;
905 dev->last_rx = jiffies;
906}
907
908/*
909 * Receive a frame through the DMA
910 */
911static inline void
912fst_rx_dma(struct fst_card_info *card, unsigned char *skb,
913 unsigned char *mem, int len)
914{
915 /*
916 * This routine will setup the DMA and start it
917 */
918
919 dbg(DBG_RX, "In fst_rx_dma %p %p %d\n", skb, mem, len);
920 if (card->dmarx_in_progress) {
921 dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
922 }
923
924 outl((unsigned long) skb, card->pci_conf + DMAPADR0); /* Copy to here */
925 outl((unsigned long) mem, card->pci_conf + DMALADR0); /* from here */
926 outl(len, card->pci_conf + DMASIZ0); /* for this length */
927 outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
928
929 /*
930 * We use the dmarx_in_progress flag to flag the channel as busy
931 */
932 card->dmarx_in_progress = 1;
933 outb(0x03, card->pci_conf + DMACSR0); /* Start the transfer */
934}
935
936/*
937 * Send a frame through the DMA
938 */
939static inline void
940fst_tx_dma(struct fst_card_info *card, unsigned char *skb,
941 unsigned char *mem, int len)
942{
943 /*
944 * This routine will setup the DMA and start it.
945 */
946
947 dbg(DBG_TX, "In fst_tx_dma %p %p %d\n", skb, mem, len);
948 if (card->dmatx_in_progress) {
949 dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
950 }
951
952 outl((unsigned long) skb, card->pci_conf + DMAPADR1); /* Copy from here */
953 outl((unsigned long) mem, card->pci_conf + DMALADR1); /* to here */
954 outl(len, card->pci_conf + DMASIZ1); /* for this length */
955 outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
956
957 /*
958 * We use the dmatx_in_progress to flag the channel as busy
959 */
960 card->dmatx_in_progress = 1;
961 outb(0x03, card->pci_conf + DMACSR1); /* Start the transfer */
962}
963
964/* Issue a Mailbox command for a port.
965 * Note we issue them on a fire and forget basis, not expecting to see an
966 * error and not waiting for completion.
967 */
968static void
969fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
970{
971 struct fst_card_info *card;
972 unsigned short mbval;
973 unsigned long flags;
974 int safety;
975
976 card = port->card;
977 spin_lock_irqsave(&card->card_lock, flags);
978 mbval = FST_RDW(card, portMailbox[port->index][0]);
979
980 safety = 0;
981 /* Wait for any previous command to complete */
982 while (mbval > NAK) {
983 spin_unlock_irqrestore(&card->card_lock, flags);
984 schedule_timeout(1);
985 spin_lock_irqsave(&card->card_lock, flags);
986
987 if (++safety > 2000) {
988 printk_err("Mailbox safety timeout\n");
989 break;
990 }
991
992 mbval = FST_RDW(card, portMailbox[port->index][0]);
993 }
994 if (safety > 0) {
995 dbg(DBG_CMD, "Mailbox clear after %d jiffies\n", safety);
996 }
997 if (mbval == NAK) {
998 dbg(DBG_CMD, "issue_cmd: previous command was NAK'd\n");
999 }
1000
1001 FST_WRW(card, portMailbox[port->index][0], cmd);
1002
1003 if (cmd == ABORTTX || cmd == STARTPORT) {
1004 port->txpos = 0;
1005 port->txipos = 0;
1006 port->start = 0;
1007 }
1008
1009 spin_unlock_irqrestore(&card->card_lock, flags);
1010}
1011
1012/* Port output signals control
1013 */
1014static inline void
1015fst_op_raise(struct fst_port_info *port, unsigned int outputs)
1016{
1017 outputs |= FST_RDL(port->card, v24OpSts[port->index]);
1018 FST_WRL(port->card, v24OpSts[port->index], outputs);
1019
1020 if (port->run)
1021 fst_issue_cmd(port, SETV24O);
1022}
1023
1024static inline void
1025fst_op_lower(struct fst_port_info *port, unsigned int outputs)
1026{
1027 outputs = ~outputs & FST_RDL(port->card, v24OpSts[port->index]);
1028 FST_WRL(port->card, v24OpSts[port->index], outputs);
1029
1030 if (port->run)
1031 fst_issue_cmd(port, SETV24O);
1032}
1033
1034/*
1035 * Setup port Rx buffers
1036 */
1037static void
1038fst_rx_config(struct fst_port_info *port)
1039{
1040 int i;
1041 int pi;
1042 unsigned int offset;
1043 unsigned long flags;
1044 struct fst_card_info *card;
1045
1046 pi = port->index;
1047 card = port->card;
1048 spin_lock_irqsave(&card->card_lock, flags);
1049 for (i = 0; i < NUM_RX_BUFFER; i++) {
1050 offset = BUF_OFFSET(rxBuffer[pi][i][0]);
1051
1052 FST_WRW(card, rxDescrRing[pi][i].ladr, (u16) offset);
1053 FST_WRB(card, rxDescrRing[pi][i].hadr, (u8) (offset >> 16));
1054 FST_WRW(card, rxDescrRing[pi][i].bcnt, cnv_bcnt(LEN_RX_BUFFER));
1055 FST_WRW(card, rxDescrRing[pi][i].mcnt, LEN_RX_BUFFER);
1056 FST_WRB(card, rxDescrRing[pi][i].bits, DMA_OWN);
1057 }
1058 port->rxpos = 0;
1059 spin_unlock_irqrestore(&card->card_lock, flags);
1060}
1061
1062/*
1063 * Setup port Tx buffers
1064 */
1065static void
1066fst_tx_config(struct fst_port_info *port)
1067{
1068 int i;
1069 int pi;
1070 unsigned int offset;
1071 unsigned long flags;
1072 struct fst_card_info *card;
1073
1074 pi = port->index;
1075 card = port->card;
1076 spin_lock_irqsave(&card->card_lock, flags);
1077 for (i = 0; i < NUM_TX_BUFFER; i++) {
1078 offset = BUF_OFFSET(txBuffer[pi][i][0]);
1079
1080 FST_WRW(card, txDescrRing[pi][i].ladr, (u16) offset);
1081 FST_WRB(card, txDescrRing[pi][i].hadr, (u8) (offset >> 16));
1082 FST_WRW(card, txDescrRing[pi][i].bcnt, 0);
1083 FST_WRB(card, txDescrRing[pi][i].bits, 0);
1084 }
1085 port->txpos = 0;
1086 port->txipos = 0;
1087 port->start = 0;
1088 spin_unlock_irqrestore(&card->card_lock, flags);
1089}
1090
1091/* TE1 Alarm change interrupt event
1092 */
1093static void
1094fst_intr_te1_alarm(struct fst_card_info *card, struct fst_port_info *port)
1095{
1096 u8 los;
1097 u8 rra;
1098 u8 ais;
1099
1100 los = FST_RDB(card, suStatus.lossOfSignal);
1101 rra = FST_RDB(card, suStatus.receiveRemoteAlarm);
1102 ais = FST_RDB(card, suStatus.alarmIndicationSignal);
1103
1104 if (los) {
1105 /*
1106 * Lost the link
1107 */
1108 if (netif_carrier_ok(port_to_dev(port))) {
1109 dbg(DBG_INTR, "Net carrier off\n");
1110 netif_carrier_off(port_to_dev(port));
1111 }
1112 } else {
1113 /*
1114 * Link available
1115 */
1116 if (!netif_carrier_ok(port_to_dev(port))) {
1117 dbg(DBG_INTR, "Net carrier on\n");
1118 netif_carrier_on(port_to_dev(port));
1119 }
1120 }
1121
1122 if (los)
1123 dbg(DBG_INTR, "Assert LOS Alarm\n");
1124 else
1125 dbg(DBG_INTR, "De-assert LOS Alarm\n");
1126 if (rra)
1127 dbg(DBG_INTR, "Assert RRA Alarm\n");
1128 else
1129 dbg(DBG_INTR, "De-assert RRA Alarm\n");
1130
1131 if (ais)
1132 dbg(DBG_INTR, "Assert AIS Alarm\n");
1133 else
1134 dbg(DBG_INTR, "De-assert AIS Alarm\n");
1135}
1136
1137/* Control signal change interrupt event
1138 */
1139static void
1140fst_intr_ctlchg(struct fst_card_info *card, struct fst_port_info *port)
1141{
1142 int signals;
1143
1144 signals = FST_RDL(card, v24DebouncedSts[port->index]);
1145
1146 if (signals & (((port->hwif == X21) || (port->hwif == X21D))
1147 ? IPSTS_INDICATE : IPSTS_DCD)) {
1148 if (!netif_carrier_ok(port_to_dev(port))) {
1149 dbg(DBG_INTR, "DCD active\n");
1150 netif_carrier_on(port_to_dev(port));
1151 }
1152 } else {
1153 if (netif_carrier_ok(port_to_dev(port))) {
1154 dbg(DBG_INTR, "DCD lost\n");
1155 netif_carrier_off(port_to_dev(port));
1156 }
1157 }
1158}
1159
1160/* Log Rx Errors
1161 */
1162static void
1163fst_log_rx_error(struct fst_card_info *card, struct fst_port_info *port,
1164 unsigned char dmabits, int rxp, unsigned short len)
1165{
1166 struct net_device *dev = port_to_dev(port);
1167 struct net_device_stats *stats = hdlc_stats(dev);
1168
1169 /*
1170 * Increment the appropriate error counter
1171 */
1172 stats->rx_errors++;
1173 if (dmabits & RX_OFLO) {
1174 stats->rx_fifo_errors++;
1175 dbg(DBG_ASS, "Rx fifo error on card %d port %d buffer %d\n",
1176 card->card_no, port->index, rxp);
1177 }
1178 if (dmabits & RX_CRC) {
1179 stats->rx_crc_errors++;
1180 dbg(DBG_ASS, "Rx crc error on card %d port %d\n",
1181 card->card_no, port->index);
1182 }
1183 if (dmabits & RX_FRAM) {
1184 stats->rx_frame_errors++;
1185 dbg(DBG_ASS, "Rx frame error on card %d port %d\n",
1186 card->card_no, port->index);
1187 }
1188 if (dmabits == (RX_STP | RX_ENP)) {
1189 stats->rx_length_errors++;
1190 dbg(DBG_ASS, "Rx length error (%d) on card %d port %d\n",
1191 len, card->card_no, port->index);
1192 }
1193}
1194
1195/* Rx Error Recovery
1196 */
1197static void
1198fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port,
1199 unsigned char dmabits, int rxp, unsigned short len)
1200{
1201 int i;
1202 int pi;
1203
1204 pi = port->index;
1205 /*
1206 * Discard buffer descriptors until we see the start of the
1207 * next frame. Note that for long frames this could be in
1208 * a subsequent interrupt.
1209 */
1210 i = 0;
1211 while ((dmabits & (DMA_OWN | RX_STP)) == 0) {
1212 FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
1213 rxp = (rxp+1) % NUM_RX_BUFFER;
1214 if (++i > NUM_RX_BUFFER) {
1215 dbg(DBG_ASS, "intr_rx: Discarding more bufs"
1216 " than we have\n");
1217 break;
1218 }
1219 dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
1220 dbg(DBG_ASS, "DMA Bits of next buffer was %x\n", dmabits);
1221 }
1222 dbg(DBG_ASS, "There were %d subsequent buffers in error\n", i);
1223
1224 /* Discard the terminal buffer */
1225 if (!(dmabits & DMA_OWN)) {
1226 FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
1227 rxp = (rxp+1) % NUM_RX_BUFFER;
1228 }
1229 port->rxpos = rxp;
1230 return;
1231
1232}
1233
1234/* Rx complete interrupt
1235 */
1236static void
1237fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
1238{
1239 unsigned char dmabits;
1240 int pi;
1241 int rxp;
1242 int rx_status;
1243 unsigned short len;
1244 struct sk_buff *skb;
1245 struct net_device *dev = port_to_dev(port);
1246 struct net_device_stats *stats = hdlc_stats(dev);
1247
1248 /* Check we have a buffer to process */
1249 pi = port->index;
1250 rxp = port->rxpos;
1251 dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
1252 if (dmabits & DMA_OWN) {
1253 dbg(DBG_RX | DBG_INTR, "intr_rx: No buffer port %d pos %d\n",
1254 pi, rxp);
1255 return;
1256 }
1257 if (card->dmarx_in_progress) {
1258 return;
1259 }
1260
1261 /* Get buffer length */
1262 len = FST_RDW(card, rxDescrRing[pi][rxp].mcnt);
1263 /* Discard the CRC */
1264 len -= 2;
1265 if (len == 0) {
1266 /*
1267 * This seems to happen on the TE1 interface sometimes
1268 * so throw the frame away and log the event.
1269 */
1270 printk_err("Frame received with 0 length. Card %d Port %d\n",
1271 card->card_no, port->index);
1272 /* Return descriptor to card */
1273 FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
1274
1275 rxp = (rxp+1) % NUM_RX_BUFFER;
1276 port->rxpos = rxp;
1277 return;
1278 }
1279
1280 /* Check buffer length and for other errors. We insist on one packet
1281 * in one buffer. This simplifies things greatly and since we've
1282 * allocated 8K it shouldn't be a real world limitation
1283 */
1284 dbg(DBG_RX, "intr_rx: %d,%d: flags %x len %d\n", pi, rxp, dmabits, len);
1285 if (dmabits != (RX_STP | RX_ENP) || len > LEN_RX_BUFFER - 2) {
1286 fst_log_rx_error(card, port, dmabits, rxp, len);
1287 fst_recover_rx_error(card, port, dmabits, rxp, len);
1288 return;
1289 }
1290
1291 /* Allocate SKB */
1292 if ((skb = dev_alloc_skb(len)) == NULL) {
1293 dbg(DBG_RX, "intr_rx: can't allocate buffer\n");
1294
1295 stats->rx_dropped++;
1296
1297 /* Return descriptor to card */
1298 FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
1299
1300 rxp = (rxp+1) % NUM_RX_BUFFER;
1301 port->rxpos = rxp;
1302 return;
1303 }
1304
1305 /*
1306 * We know the length we need to receive, len.
1307 * It's not worth using the DMA for reads of less than
1308 * FST_MIN_DMA_LEN
1309 */
1310
1311 if ((len < FST_MIN_DMA_LEN) || (card->family == FST_FAMILY_TXP)) {
1312 memcpy_fromio(skb_put(skb, len),
1313 card->mem + BUF_OFFSET(rxBuffer[pi][rxp][0]),
1314 len);
1315
1316 /* Reset buffer descriptor */
1317 FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
1318
1319 /* Update stats */
1320 stats->rx_packets++;
1321 stats->rx_bytes += len;
1322
1323 /* Push upstream */
1324 dbg(DBG_RX, "Pushing frame up the stack\n");
1325 if (port->mode == FST_RAW)
1326 skb->protocol = farsync_type_trans(skb, dev);
1327 else
1328 skb->protocol = hdlc_type_trans(skb, dev);
1329 rx_status = netif_rx(skb);
1330 fst_process_rx_status(rx_status, port_to_dev(port)->name);
1331 if (rx_status == NET_RX_DROP) {
1332 stats->rx_dropped++;
1333 }
1334 dev->last_rx = jiffies;
1335 } else {
1336 card->dma_skb_rx = skb;
1337 card->dma_port_rx = port;
1338 card->dma_len_rx = len;
1339 card->dma_rxpos = rxp;
1340 fst_rx_dma(card, (char *) card->rx_dma_handle_card,
1341 (char *) BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
1342 }
1343 if (rxp != port->rxpos) {
1344 dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
1345 dbg(DBG_ASS, "rxp = %d rxpos = %d\n", rxp, port->rxpos);
1346 }
1347 rxp = (rxp+1) % NUM_RX_BUFFER;
1348 port->rxpos = rxp;
1349}
1350
1351/*
1352 * The bottom halfs to the ISR
1353 *
1354 */
1355
1356static void
1357do_bottom_half_tx(struct fst_card_info *card)
1358{
1359 struct fst_port_info *port;
1360 int pi;
1361 int txq_length;
1362 struct sk_buff *skb;
1363 unsigned long flags;
1364 struct net_device *dev;
1365 struct net_device_stats *stats;
1366
1367 /*
1368 * Find a free buffer for the transmit
1369 * Step through each port on this card
1370 */
1371
1372 dbg(DBG_TX, "do_bottom_half_tx\n");
1373 for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
1374 if (!port->run)
1375 continue;
1376
1377 dev = port_to_dev(port);
1378 stats = hdlc_stats(dev);
1379 while (!
1380 (FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
1381 DMA_OWN)
1382 && !(card->dmatx_in_progress)) {
1383 /*
1384 * There doesn't seem to be a txdone event per-se
1385 * We seem to have to deduce it, by checking the DMA_OWN
1386 * bit on the next buffer we think we can use
1387 */
1388 spin_lock_irqsave(&card->card_lock, flags);
1389 if ((txq_length = port->txqe - port->txqs) < 0) {
1390 /*
1391 * This is the case where one has wrapped and the
1392 * maths gives us a negative number
1393 */
1394 txq_length = txq_length + FST_TXQ_DEPTH;
1395 }
1396 spin_unlock_irqrestore(&card->card_lock, flags);
1397 if (txq_length > 0) {
1398 /*
1399 * There is something to send
1400 */
1401 spin_lock_irqsave(&card->card_lock, flags);
1402 skb = port->txq[port->txqs];
1403 port->txqs++;
1404 if (port->txqs == FST_TXQ_DEPTH) {
1405 port->txqs = 0;
1406 }
1407 spin_unlock_irqrestore(&card->card_lock, flags);
1408 /*
1409 * copy the data and set the required indicators on the
1410 * card.
1411 */
1412 FST_WRW(card, txDescrRing[pi][port->txpos].bcnt,
1413 cnv_bcnt(skb->len));
1414 if ((skb->len < FST_MIN_DMA_LEN)
1415 || (card->family == FST_FAMILY_TXP)) {
1416 /* Enqueue the packet with normal io */
1417 memcpy_toio(card->mem +
1418 BUF_OFFSET(txBuffer[pi]
1419 [port->
1420 txpos][0]),
1421 skb->data, skb->len);
1422 FST_WRB(card,
1423 txDescrRing[pi][port->txpos].
1424 bits,
1425 DMA_OWN | TX_STP | TX_ENP);
1426 stats->tx_packets++;
1427 stats->tx_bytes += skb->len;
1428 dev->trans_start = jiffies;
1429 } else {
1430 /* Or do it through dma */
1431 memcpy(card->tx_dma_handle_host,
1432 skb->data, skb->len);
1433 card->dma_port_tx = port;
1434 card->dma_len_tx = skb->len;
1435 card->dma_txpos = port->txpos;
1436 fst_tx_dma(card,
1437 (char *) card->
1438 tx_dma_handle_card,
1439 (char *)
1440 BUF_OFFSET(txBuffer[pi]
1441 [port->txpos][0]),
1442 skb->len);
1443 }
1444 if (++port->txpos >= NUM_TX_BUFFER)
1445 port->txpos = 0;
1446 /*
1447 * If we have flow control on, can we now release it?
1448 */
1449 if (port->start) {
1450 if (txq_length < fst_txq_low) {
1451 netif_wake_queue(port_to_dev
1452 (port));
1453 port->start = 0;
1454 }
1455 }
1456 dev_kfree_skb(skb);
1457 } else {
1458 /*
1459 * Nothing to send so break out of the while loop
1460 */
1461 break;
1462 }
1463 }
1464 }
1465}
1466
1467static void
1468do_bottom_half_rx(struct fst_card_info *card)
1469{
1470 struct fst_port_info *port;
1471 int pi;
1472 int rx_count = 0;
1473
1474 /* Check for rx completions on all ports on this card */
1475 dbg(DBG_RX, "do_bottom_half_rx\n");
1476 for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
1477 if (!port->run)
1478 continue;
1479
1480 while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits)
1481 & DMA_OWN) && !(card->dmarx_in_progress)) {
1482 if (rx_count > fst_max_reads) {
1483 /*
1484 * Don't spend forever in receive processing
1485 * Schedule another event
1486 */
1487 fst_q_work_item(&fst_work_intq, card->card_no);
1488 tasklet_schedule(&fst_int_task);
1489 break; /* Leave the loop */
1490 }
1491 fst_intr_rx(card, port);
1492 rx_count++;
1493 }
1494 }
1495}
1496
1497/*
1498 * The interrupt service routine
1499 * Dev_id is our fst_card_info pointer
1500 */
1501irqreturn_t
1502fst_intr(int irq, void *dev_id, struct pt_regs *regs)
1503{
1504 struct fst_card_info *card;
1505 struct fst_port_info *port;
1506 int rdidx; /* Event buffer indices */
1507 int wridx;
1508 int event; /* Actual event for processing */
1509 unsigned int dma_intcsr = 0;
1510 unsigned int do_card_interrupt;
1511 unsigned int int_retry_count;
1512
1513 if ((card = dev_id) == NULL) {
1514 dbg(DBG_INTR, "intr: spurious %d\n", irq);
1515 return IRQ_NONE;
1516 }
1517
1518 /*
1519 * Check to see if the interrupt was for this card
1520 * return if not
1521 * Note that the call to clear the interrupt is important
1522 */
1523 dbg(DBG_INTR, "intr: %d %p\n", irq, card);
1524 if (card->state != FST_RUNNING) {
1525 printk_err
1526 ("Interrupt received for card %d in a non running state (%d)\n",
1527 card->card_no, card->state);
1528
1529 /*
1530 * It is possible to really be running, i.e. we have re-loaded
1531 * a running card
1532 * Clear and reprime the interrupt source
1533 */
1534 fst_clear_intr(card);
1535 return IRQ_HANDLED;
1536 }
1537
1538 /* Clear and reprime the interrupt source */
1539 fst_clear_intr(card);
1540
1541 /*
1542 * Is the interrupt for this card (handshake == 1)
1543 */
1544 do_card_interrupt = 0;
1545 if (FST_RDB(card, interruptHandshake) == 1) {
1546 do_card_interrupt += FST_CARD_INT;
1547 /* Set the software acknowledge */
1548 FST_WRB(card, interruptHandshake, 0xEE);
1549 }
1550 if (card->family == FST_FAMILY_TXU) {
1551 /*
1552 * Is it a DMA Interrupt
1553 */
1554 dma_intcsr = inl(card->pci_conf + INTCSR_9054);
1555 if (dma_intcsr & 0x00200000) {
1556 /*
1557 * DMA Channel 0 (Rx transfer complete)
1558 */
1559 dbg(DBG_RX, "DMA Rx xfer complete\n");
1560 outb(0x8, card->pci_conf + DMACSR0);
1561 fst_rx_dma_complete(card, card->dma_port_rx,
1562 card->dma_len_rx, card->dma_skb_rx,
1563 card->dma_rxpos);
1564 card->dmarx_in_progress = 0;
1565 do_card_interrupt += FST_RX_DMA_INT;
1566 }
1567 if (dma_intcsr & 0x00400000) {
1568 /*
1569 * DMA Channel 1 (Tx transfer complete)
1570 */
1571 dbg(DBG_TX, "DMA Tx xfer complete\n");
1572 outb(0x8, card->pci_conf + DMACSR1);
1573 fst_tx_dma_complete(card, card->dma_port_tx,
1574 card->dma_len_tx, card->dma_txpos);
1575 card->dmatx_in_progress = 0;
1576 do_card_interrupt += FST_TX_DMA_INT;
1577 }
1578 }
1579
1580 /*
1581 * Have we been missing Interrupts
1582 */
1583 int_retry_count = FST_RDL(card, interruptRetryCount);
1584 if (int_retry_count) {
1585 dbg(DBG_ASS, "Card %d int_retry_count is %d\n",
1586 card->card_no, int_retry_count);
1587 FST_WRL(card, interruptRetryCount, 0);
1588 }
1589
1590 if (!do_card_interrupt) {
1591 return IRQ_HANDLED;
1592 }
1593
1594 /* Scehdule the bottom half of the ISR */
1595 fst_q_work_item(&fst_work_intq, card->card_no);
1596 tasklet_schedule(&fst_int_task);
1597
1598 /* Drain the event queue */
1599 rdidx = FST_RDB(card, interruptEvent.rdindex) & 0x1f;
1600 wridx = FST_RDB(card, interruptEvent.wrindex) & 0x1f;
1601 while (rdidx != wridx) {
1602 event = FST_RDB(card, interruptEvent.evntbuff[rdidx]);
1603 port = &card->ports[event & 0x03];
1604
1605 dbg(DBG_INTR, "Processing Interrupt event: %x\n", event);
1606
1607 switch (event) {
1608 case TE1_ALMA:
1609 dbg(DBG_INTR, "TE1 Alarm intr\n");
1610 if (port->run)
1611 fst_intr_te1_alarm(card, port);
1612 break;
1613
1614 case CTLA_CHG:
1615 case CTLB_CHG:
1616 case CTLC_CHG:
1617 case CTLD_CHG:
1618 if (port->run)
1619 fst_intr_ctlchg(card, port);
1620 break;
1621
1622 case ABTA_SENT:
1623 case ABTB_SENT:
1624 case ABTC_SENT:
1625 case ABTD_SENT:
1626 dbg(DBG_TX, "Abort complete port %d\n", port->index);
1627 break;
1628
1629 case TXA_UNDF:
1630 case TXB_UNDF:
1631 case TXC_UNDF:
1632 case TXD_UNDF:
1633 /* Difficult to see how we'd get this given that we
1634 * always load up the entire packet for DMA.
1635 */
1636 dbg(DBG_TX, "Tx underflow port %d\n", port->index);
1637 hdlc_stats(port_to_dev(port))->tx_errors++;
1638 hdlc_stats(port_to_dev(port))->tx_fifo_errors++;
1639 dbg(DBG_ASS, "Tx underflow on card %d port %d\n",
1640 card->card_no, port->index);
1641 break;
1642
1643 case INIT_CPLT:
1644 dbg(DBG_INIT, "Card init OK intr\n");
1645 break;
1646
1647 case INIT_FAIL:
1648 dbg(DBG_INIT, "Card init FAILED intr\n");
1649 card->state = FST_IFAILED;
1650 break;
1651
1652 default:
1653 printk_err("intr: unknown card event %d. ignored\n",
1654 event);
1655 break;
1656 }
1657
1658 /* Bump and wrap the index */
1659 if (++rdidx >= MAX_CIRBUFF)
1660 rdidx = 0;
1661 }
1662 FST_WRB(card, interruptEvent.rdindex, rdidx);
1663 return IRQ_HANDLED;
1664}
1665
1666/* Check that the shared memory configuration is one that we can handle
1667 * and that some basic parameters are correct
1668 */
1669static void
1670check_started_ok(struct fst_card_info *card)
1671{
1672 int i;
1673
1674 /* Check structure version and end marker */
1675 if (FST_RDW(card, smcVersion) != SMC_VERSION) {
1676 printk_err("Bad shared memory version %d expected %d\n",
1677 FST_RDW(card, smcVersion), SMC_VERSION);
1678 card->state = FST_BADVERSION;
1679 return;
1680 }
1681 if (FST_RDL(card, endOfSmcSignature) != END_SIG) {
1682 printk_err("Missing shared memory signature\n");
1683 card->state = FST_BADVERSION;
1684 return;
1685 }
1686 /* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
1687 if ((i = FST_RDB(card, taskStatus)) == 0x01) {
1688 card->state = FST_RUNNING;
1689 } else if (i == 0xFF) {
1690 printk_err("Firmware initialisation failed. Card halted\n");
1691 card->state = FST_HALTED;
1692 return;
1693 } else if (i != 0x00) {
1694 printk_err("Unknown firmware status 0x%x\n", i);
1695 card->state = FST_HALTED;
1696 return;
1697 }
1698
1699 /* Finally check the number of ports reported by firmware against the
1700 * number we assumed at card detection. Should never happen with
1701 * existing firmware etc so we just report it for the moment.
1702 */
1703 if (FST_RDL(card, numberOfPorts) != card->nports) {
1704 printk_warn("Port count mismatch on card %d."
1705 " Firmware thinks %d we say %d\n", card->card_no,
1706 FST_RDL(card, numberOfPorts), card->nports);
1707 }
1708}
1709
1710static int
1711set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port,
1712 struct fstioc_info *info)
1713{
1714 int err;
1715 unsigned char my_framing;
1716
1717 /* Set things according to the user set valid flags
1718 * Several of the old options have been invalidated/replaced by the
1719 * generic hdlc package.
1720 */
1721 err = 0;
1722 if (info->valid & FSTVAL_PROTO) {
1723 if (info->proto == FST_RAW)
1724 port->mode = FST_RAW;
1725 else
1726 port->mode = FST_GEN_HDLC;
1727 }
1728
1729 if (info->valid & FSTVAL_CABLE)
1730 err = -EINVAL;
1731
1732 if (info->valid & FSTVAL_SPEED)
1733 err = -EINVAL;
1734
1735 if (info->valid & FSTVAL_PHASE)
1736 FST_WRB(card, portConfig[port->index].invertClock,
1737 info->invertClock);
1738 if (info->valid & FSTVAL_MODE)
1739 FST_WRW(card, cardMode, info->cardMode);
1740 if (info->valid & FSTVAL_TE1) {
1741 FST_WRL(card, suConfig.dataRate, info->lineSpeed);
1742 FST_WRB(card, suConfig.clocking, info->clockSource);
1743 my_framing = FRAMING_E1;
1744 if (info->framing == E1)
1745 my_framing = FRAMING_E1;
1746 if (info->framing == T1)
1747 my_framing = FRAMING_T1;
1748 if (info->framing == J1)
1749 my_framing = FRAMING_J1;
1750 FST_WRB(card, suConfig.framing, my_framing);
1751 FST_WRB(card, suConfig.structure, info->structure);
1752 FST_WRB(card, suConfig.interface, info->interface);
1753 FST_WRB(card, suConfig.coding, info->coding);
1754 FST_WRB(card, suConfig.lineBuildOut, info->lineBuildOut);
1755 FST_WRB(card, suConfig.equalizer, info->equalizer);
1756 FST_WRB(card, suConfig.transparentMode, info->transparentMode);
1757 FST_WRB(card, suConfig.loopMode, info->loopMode);
1758 FST_WRB(card, suConfig.range, info->range);
1759 FST_WRB(card, suConfig.txBufferMode, info->txBufferMode);
1760 FST_WRB(card, suConfig.rxBufferMode, info->rxBufferMode);
1761 FST_WRB(card, suConfig.startingSlot, info->startingSlot);
1762 FST_WRB(card, suConfig.losThreshold, info->losThreshold);
1763 if (info->idleCode)
1764 FST_WRB(card, suConfig.enableIdleCode, 1);
1765 else
1766 FST_WRB(card, suConfig.enableIdleCode, 0);
1767 FST_WRB(card, suConfig.idleCode, info->idleCode);
1768#if FST_DEBUG
1769 if (info->valid & FSTVAL_TE1) {
1770 printk("Setting TE1 data\n");
1771 printk("Line Speed = %d\n", info->lineSpeed);
1772 printk("Start slot = %d\n", info->startingSlot);
1773 printk("Clock source = %d\n", info->clockSource);
1774 printk("Framing = %d\n", my_framing);
1775 printk("Structure = %d\n", info->structure);
1776 printk("interface = %d\n", info->interface);
1777 printk("Coding = %d\n", info->coding);
1778 printk("Line build out = %d\n", info->lineBuildOut);
1779 printk("Equaliser = %d\n", info->equalizer);
1780 printk("Transparent mode = %d\n",
1781 info->transparentMode);
1782 printk("Loop mode = %d\n", info->loopMode);
1783 printk("Range = %d\n", info->range);
1784 printk("Tx Buffer mode = %d\n", info->txBufferMode);
1785 printk("Rx Buffer mode = %d\n", info->rxBufferMode);
1786 printk("LOS Threshold = %d\n", info->losThreshold);
1787 printk("Idle Code = %d\n", info->idleCode);
1788 }
1789#endif
1790 }
1791#if FST_DEBUG
1792 if (info->valid & FSTVAL_DEBUG) {
1793 fst_debug_mask = info->debug;
1794 }
1795#endif
1796
1797 return err;
1798}
1799
1800static void
1801gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
1802 struct fstioc_info *info)
1803{
1804 int i;
1805
1806 memset(info, 0, sizeof (struct fstioc_info));
1807
1808 i = port->index;
1809 info->kernelVersion = LINUX_VERSION_CODE;
1810 info->nports = card->nports;
1811 info->type = card->type;
1812 info->state = card->state;
1813 info->proto = FST_GEN_HDLC;
1814 info->index = i;
1815#if FST_DEBUG
1816 info->debug = fst_debug_mask;
1817#endif
1818
1819 /* Only mark information as valid if card is running.
1820 * Copy the data anyway in case it is useful for diagnostics
1821 */
1822 info->valid = ((card->state == FST_RUNNING) ? FSTVAL_ALL : FSTVAL_CARD)
1823#if FST_DEBUG
1824 | FSTVAL_DEBUG
1825#endif
1826 ;
1827
1828 info->lineInterface = FST_RDW(card, portConfig[i].lineInterface);
1829 info->internalClock = FST_RDB(card, portConfig[i].internalClock);
1830 info->lineSpeed = FST_RDL(card, portConfig[i].lineSpeed);
1831 info->invertClock = FST_RDB(card, portConfig[i].invertClock);
1832 info->v24IpSts = FST_RDL(card, v24IpSts[i]);
1833 info->v24OpSts = FST_RDL(card, v24OpSts[i]);
1834 info->clockStatus = FST_RDW(card, clockStatus[i]);
1835 info->cableStatus = FST_RDW(card, cableStatus);
1836 info->cardMode = FST_RDW(card, cardMode);
1837 info->smcFirmwareVersion = FST_RDL(card, smcFirmwareVersion);
1838
1839 /*
1840 * The T2U can report cable presence for both A or B
1841 * in bits 0 and 1 of cableStatus. See which port we are and
1842 * do the mapping.
1843 */
1844 if (card->family == FST_FAMILY_TXU) {
1845 if (port->index == 0) {
1846 /*
1847 * Port A
1848 */
1849 info->cableStatus = info->cableStatus & 1;
1850 } else {
1851 /*
1852 * Port B
1853 */
1854 info->cableStatus = info->cableStatus >> 1;
1855 info->cableStatus = info->cableStatus & 1;
1856 }
1857 }
1858 /*
1859 * Some additional bits if we are TE1
1860 */
1861 if (card->type == FST_TYPE_TE1) {
1862 info->lineSpeed = FST_RDL(card, suConfig.dataRate);
1863 info->clockSource = FST_RDB(card, suConfig.clocking);
1864 info->framing = FST_RDB(card, suConfig.framing);
1865 info->structure = FST_RDB(card, suConfig.structure);
1866 info->interface = FST_RDB(card, suConfig.interface);
1867 info->coding = FST_RDB(card, suConfig.coding);
1868 info->lineBuildOut = FST_RDB(card, suConfig.lineBuildOut);
1869 info->equalizer = FST_RDB(card, suConfig.equalizer);
1870 info->loopMode = FST_RDB(card, suConfig.loopMode);
1871 info->range = FST_RDB(card, suConfig.range);
1872 info->txBufferMode = FST_RDB(card, suConfig.txBufferMode);
1873 info->rxBufferMode = FST_RDB(card, suConfig.rxBufferMode);
1874 info->startingSlot = FST_RDB(card, suConfig.startingSlot);
1875 info->losThreshold = FST_RDB(card, suConfig.losThreshold);
1876 if (FST_RDB(card, suConfig.enableIdleCode))
1877 info->idleCode = FST_RDB(card, suConfig.idleCode);
1878 else
1879 info->idleCode = 0;
1880 info->receiveBufferDelay =
1881 FST_RDL(card, suStatus.receiveBufferDelay);
1882 info->framingErrorCount =
1883 FST_RDL(card, suStatus.framingErrorCount);
1884 info->codeViolationCount =
1885 FST_RDL(card, suStatus.codeViolationCount);
1886 info->crcErrorCount = FST_RDL(card, suStatus.crcErrorCount);
1887 info->lineAttenuation = FST_RDL(card, suStatus.lineAttenuation);
1888 info->lossOfSignal = FST_RDB(card, suStatus.lossOfSignal);
1889 info->receiveRemoteAlarm =
1890 FST_RDB(card, suStatus.receiveRemoteAlarm);
1891 info->alarmIndicationSignal =
1892 FST_RDB(card, suStatus.alarmIndicationSignal);
1893 }
1894}
1895
1896static int
1897fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
1898 struct ifreq *ifr)
1899{
1900 sync_serial_settings sync;
1901 int i;
1902
1903 if (ifr->ifr_settings.size != sizeof (sync)) {
1904 return -ENOMEM;
1905 }
1906
1907 if (copy_from_user
1908 (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof (sync))) {
1909 return -EFAULT;
1910 }
1911
1912 if (sync.loopback)
1913 return -EINVAL;
1914
1915 i = port->index;
1916
1917 switch (ifr->ifr_settings.type) {
1918 case IF_IFACE_V35:
1919 FST_WRW(card, portConfig[i].lineInterface, V35);
1920 port->hwif = V35;
1921 break;
1922
1923 case IF_IFACE_V24:
1924 FST_WRW(card, portConfig[i].lineInterface, V24);
1925 port->hwif = V24;
1926 break;
1927
1928 case IF_IFACE_X21:
1929 FST_WRW(card, portConfig[i].lineInterface, X21);
1930 port->hwif = X21;
1931 break;
1932
1933 case IF_IFACE_X21D:
1934 FST_WRW(card, portConfig[i].lineInterface, X21D);
1935 port->hwif = X21D;
1936 break;
1937
1938 case IF_IFACE_T1:
1939 FST_WRW(card, portConfig[i].lineInterface, T1);
1940 port->hwif = T1;
1941 break;
1942
1943 case IF_IFACE_E1:
1944 FST_WRW(card, portConfig[i].lineInterface, E1);
1945 port->hwif = E1;
1946 break;
1947
1948 case IF_IFACE_SYNC_SERIAL:
1949 break;
1950
1951 default:
1952 return -EINVAL;
1953 }
1954
1955 switch (sync.clock_type) {
1956 case CLOCK_EXT:
1957 FST_WRB(card, portConfig[i].internalClock, EXTCLK);
1958 break;
1959
1960 case CLOCK_INT:
1961 FST_WRB(card, portConfig[i].internalClock, INTCLK);
1962 break;
1963
1964 default:
1965 return -EINVAL;
1966 }
1967 FST_WRL(card, portConfig[i].lineSpeed, sync.clock_rate);
1968 return 0;
1969}
1970
1971static int
1972fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
1973 struct ifreq *ifr)
1974{
1975 sync_serial_settings sync;
1976 int i;
1977
1978 /* First check what line type is set, we'll default to reporting X.21
1979 * if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be
1980 * changed
1981 */
1982 switch (port->hwif) {
1983 case E1:
1984 ifr->ifr_settings.type = IF_IFACE_E1;
1985 break;
1986 case T1:
1987 ifr->ifr_settings.type = IF_IFACE_T1;
1988 break;
1989 case V35:
1990 ifr->ifr_settings.type = IF_IFACE_V35;
1991 break;
1992 case V24:
1993 ifr->ifr_settings.type = IF_IFACE_V24;
1994 break;
1995 case X21D:
1996 ifr->ifr_settings.type = IF_IFACE_X21D;
1997 break;
1998 case X21:
1999 default:
2000 ifr->ifr_settings.type = IF_IFACE_X21;
2001 break;
2002 }
2003 if (ifr->ifr_settings.size == 0) {
2004 return 0; /* only type requested */
2005 }
2006 if (ifr->ifr_settings.size < sizeof (sync)) {
2007 return -ENOMEM;
2008 }
2009
2010 i = port->index;
2011 sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
2012 /* Lucky card and linux use same encoding here */
2013 sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
2014 INTCLK ? CLOCK_INT : CLOCK_EXT;
2015 sync.loopback = 0;
2016
2017 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof (sync))) {
2018 return -EFAULT;
2019 }
2020
2021 ifr->ifr_settings.size = sizeof (sync);
2022 return 0;
2023}
2024
2025static int
2026fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2027{
2028 struct fst_card_info *card;
2029 struct fst_port_info *port;
2030 struct fstioc_write wrthdr;
2031 struct fstioc_info info;
2032 unsigned long flags;
2033
2034 dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, ifr->ifr_data);
2035
2036 port = dev_to_port(dev);
2037 card = port->card;
2038
2039 if (!capable(CAP_NET_ADMIN))
2040 return -EPERM;
2041
2042 switch (cmd) {
2043 case FSTCPURESET:
2044 fst_cpureset(card);
2045 card->state = FST_RESET;
2046 return 0;
2047
2048 case FSTCPURELEASE:
2049 fst_cpurelease(card);
2050 card->state = FST_STARTING;
2051 return 0;
2052
2053 case FSTWRITE: /* Code write (download) */
2054
2055 /* First copy in the header with the length and offset of data
2056 * to write
2057 */
2058 if (ifr->ifr_data == NULL) {
2059 return -EINVAL;
2060 }
2061 if (copy_from_user(&wrthdr, ifr->ifr_data,
2062 sizeof (struct fstioc_write))) {
2063 return -EFAULT;
2064 }
2065
2066 /* Sanity check the parameters. We don't support partial writes
2067 * when going over the top
2068 */
2069 if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE
2070 || wrthdr.size + wrthdr.offset > FST_MEMSIZE) {
2071 return -ENXIO;
2072 }
2073
2074 /* Now copy the data to the card.
2075 * This will probably break on some architectures.
2076 * I'll fix it when I have something to test on.
2077 */
2078 if (copy_from_user(card->mem + wrthdr.offset,
2079 ifr->ifr_data + sizeof (struct fstioc_write),
2080 wrthdr.size)) {
2081 return -EFAULT;
2082 }
2083
2084 /* Writes to the memory of a card in the reset state constitute
2085 * a download
2086 */
2087 if (card->state == FST_RESET) {
2088 card->state = FST_DOWNLOAD;
2089 }
2090 return 0;
2091
2092 case FSTGETCONF:
2093
2094 /* If card has just been started check the shared memory config
2095 * version and marker
2096 */
2097 if (card->state == FST_STARTING) {
2098 check_started_ok(card);
2099
2100 /* If everything checked out enable card interrupts */
2101 if (card->state == FST_RUNNING) {
2102 spin_lock_irqsave(&card->card_lock, flags);
2103 fst_enable_intr(card);
2104 FST_WRB(card, interruptHandshake, 0xEE);
2105 spin_unlock_irqrestore(&card->card_lock, flags);
2106 }
2107 }
2108
2109 if (ifr->ifr_data == NULL) {
2110 return -EINVAL;
2111 }
2112
2113 gather_conf_info(card, port, &info);
2114
2115 if (copy_to_user(ifr->ifr_data, &info, sizeof (info))) {
2116 return -EFAULT;
2117 }
2118 return 0;
2119
2120 case FSTSETCONF:
2121
2122 /*
2123 * Most of the settings have been moved to the generic ioctls
2124 * this just covers debug and board ident now
2125 */
2126
2127 if (card->state != FST_RUNNING) {
2128 printk_err
2129 ("Attempt to configure card %d in non-running state (%d)\n",
2130 card->card_no, card->state);
2131 return -EIO;
2132 }
2133 if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) {
2134 return -EFAULT;
2135 }
2136
2137 return set_conf_from_info(card, port, &info);
2138
2139 case SIOCWANDEV:
2140 switch (ifr->ifr_settings.type) {
2141 case IF_GET_IFACE:
2142 return fst_get_iface(card, port, ifr);
2143
2144 case IF_IFACE_SYNC_SERIAL:
2145 case IF_IFACE_V35:
2146 case IF_IFACE_V24:
2147 case IF_IFACE_X21:
2148 case IF_IFACE_X21D:
2149 case IF_IFACE_T1:
2150 case IF_IFACE_E1:
2151 return fst_set_iface(card, port, ifr);
2152
2153 case IF_PROTO_RAW:
2154 port->mode = FST_RAW;
2155 return 0;
2156
2157 case IF_GET_PROTO:
2158 if (port->mode == FST_RAW) {
2159 ifr->ifr_settings.type = IF_PROTO_RAW;
2160 return 0;
2161 }
2162 return hdlc_ioctl(dev, ifr, cmd);
2163
2164 default:
2165 port->mode = FST_GEN_HDLC;
2166 dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
2167 ifr->ifr_settings.type);
2168 return hdlc_ioctl(dev, ifr, cmd);
2169 }
2170
2171 default:
2172 /* Not one of ours. Pass through to HDLC package */
2173 return hdlc_ioctl(dev, ifr, cmd);
2174 }
2175}
2176
2177static void
2178fst_openport(struct fst_port_info *port)
2179{
2180 int signals;
2181 int txq_length;
2182
2183 /* Only init things if card is actually running. This allows open to
2184 * succeed for downloads etc.
2185 */
2186 if (port->card->state == FST_RUNNING) {
2187 if (port->run) {
2188 dbg(DBG_OPEN, "open: found port already running\n");
2189
2190 fst_issue_cmd(port, STOPPORT);
2191 port->run = 0;
2192 }
2193
2194 fst_rx_config(port);
2195 fst_tx_config(port);
2196 fst_op_raise(port, OPSTS_RTS | OPSTS_DTR);
2197
2198 fst_issue_cmd(port, STARTPORT);
2199 port->run = 1;
2200
2201 signals = FST_RDL(port->card, v24DebouncedSts[port->index]);
2202 if (signals & (((port->hwif == X21) || (port->hwif == X21D))
2203 ? IPSTS_INDICATE : IPSTS_DCD))
2204 netif_carrier_on(port_to_dev(port));
2205 else
2206 netif_carrier_off(port_to_dev(port));
2207
2208 txq_length = port->txqe - port->txqs;
2209 port->txqe = 0;
2210 port->txqs = 0;
2211 }
2212
2213}
2214
2215static void
2216fst_closeport(struct fst_port_info *port)
2217{
2218 if (port->card->state == FST_RUNNING) {
2219 if (port->run) {
2220 port->run = 0;
2221 fst_op_lower(port, OPSTS_RTS | OPSTS_DTR);
2222
2223 fst_issue_cmd(port, STOPPORT);
2224 } else {
2225 dbg(DBG_OPEN, "close: port not running\n");
2226 }
2227 }
2228}
2229
2230static int
2231fst_open(struct net_device *dev)
2232{
2233 int err;
2234 struct fst_port_info *port;
2235
2236 port = dev_to_port(dev);
2237 if (!try_module_get(THIS_MODULE))
2238 return -EBUSY;
2239
2240 if (port->mode != FST_RAW) {
2241 err = hdlc_open(dev);
2242 if (err)
2243 return err;
2244 }
2245
2246 fst_openport(port);
2247 netif_wake_queue(dev);
2248 return 0;
2249}
2250
2251static int
2252fst_close(struct net_device *dev)
2253{
2254 struct fst_port_info *port;
2255 struct fst_card_info *card;
2256 unsigned char tx_dma_done;
2257 unsigned char rx_dma_done;
2258
2259 port = dev_to_port(dev);
2260 card = port->card;
2261
2262 tx_dma_done = inb(card->pci_conf + DMACSR1);
2263 rx_dma_done = inb(card->pci_conf + DMACSR0);
2264 dbg(DBG_OPEN,
2265 "Port Close: tx_dma_in_progress = %d (%x) rx_dma_in_progress = %d (%x)\n",
2266 card->dmatx_in_progress, tx_dma_done, card->dmarx_in_progress,
2267 rx_dma_done);
2268
2269 netif_stop_queue(dev);
2270 fst_closeport(dev_to_port(dev));
2271 if (port->mode != FST_RAW) {
2272 hdlc_close(dev);
2273 }
2274 module_put(THIS_MODULE);
2275 return 0;
2276}
2277
2278static int
2279fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parity)
2280{
2281 /*
2282 * Setting currently fixed in FarSync card so we check and forget
2283 */
2284 if (encoding != ENCODING_NRZ || parity != PARITY_CRC16_PR1_CCITT)
2285 return -EINVAL;
2286 return 0;
2287}
2288
2289static void
2290fst_tx_timeout(struct net_device *dev)
2291{
2292 struct fst_port_info *port;
2293 struct fst_card_info *card;
2294 struct net_device_stats *stats = hdlc_stats(dev);
2295
2296 port = dev_to_port(dev);
2297 card = port->card;
2298 stats->tx_errors++;
2299 stats->tx_aborted_errors++;
2300 dbg(DBG_ASS, "Tx timeout card %d port %d\n",
2301 card->card_no, port->index);
2302 fst_issue_cmd(port, ABORTTX);
2303
2304 dev->trans_start = jiffies;
2305 netif_wake_queue(dev);
2306 port->start = 0;
2307}
2308
2309static int
2310fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
2311{
2312 struct fst_card_info *card;
2313 struct fst_port_info *port;
2314 struct net_device_stats *stats = hdlc_stats(dev);
2315 unsigned long flags;
2316 int txq_length;
2317
2318 port = dev_to_port(dev);
2319 card = port->card;
2320 dbg(DBG_TX, "fst_start_xmit: length = %d\n", skb->len);
2321
2322 /* Drop packet with error if we don't have carrier */
2323 if (!netif_carrier_ok(dev)) {
2324 dev_kfree_skb(skb);
2325 stats->tx_errors++;
2326 stats->tx_carrier_errors++;
2327 dbg(DBG_ASS,
2328 "Tried to transmit but no carrier on card %d port %d\n",
2329 card->card_no, port->index);
2330 return 0;
2331 }
2332
2333 /* Drop it if it's too big! MTU failure ? */
2334 if (skb->len > LEN_TX_BUFFER) {
2335 dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len,
2336 LEN_TX_BUFFER);
2337 dev_kfree_skb(skb);
2338 stats->tx_errors++;
2339 return 0;
2340 }
2341
2342 /*
2343 * We are always going to queue the packet
2344 * so that the bottom half is the only place we tx from
2345 * Check there is room in the port txq
2346 */
2347 spin_lock_irqsave(&card->card_lock, flags);
2348 if ((txq_length = port->txqe - port->txqs) < 0) {
2349 /*
2350 * This is the case where the next free has wrapped but the
2351 * last used hasn't
2352 */
2353 txq_length = txq_length + FST_TXQ_DEPTH;
2354 }
2355 spin_unlock_irqrestore(&card->card_lock, flags);
2356 if (txq_length > fst_txq_high) {
2357 /*
2358 * We have got enough buffers in the pipeline. Ask the network
2359 * layer to stop sending frames down
2360 */
2361 netif_stop_queue(dev);
2362 port->start = 1; /* I'm using this to signal stop sent up */
2363 }
2364
2365 if (txq_length == FST_TXQ_DEPTH - 1) {
2366 /*
2367 * This shouldn't have happened but such is life
2368 */
2369 dev_kfree_skb(skb);
2370 stats->tx_errors++;
2371 dbg(DBG_ASS, "Tx queue overflow card %d port %d\n",
2372 card->card_no, port->index);
2373 return 0;
2374 }
2375
2376 /*
2377 * queue the buffer
2378 */
2379 spin_lock_irqsave(&card->card_lock, flags);
2380 port->txq[port->txqe] = skb;
2381 port->txqe++;
2382 if (port->txqe == FST_TXQ_DEPTH)
2383 port->txqe = 0;
2384 spin_unlock_irqrestore(&card->card_lock, flags);
2385
2386 /* Scehdule the bottom half which now does transmit processing */
2387 fst_q_work_item(&fst_work_txq, card->card_no);
2388 tasklet_schedule(&fst_tx_task);
2389
2390 return 0;
2391}
2392
2393/*
2394 * Card setup having checked hardware resources.
2395 * Should be pretty bizarre if we get an error here (kernel memory
2396 * exhaustion is one possibility). If we do see a problem we report it
2397 * via a printk and leave the corresponding interface and all that follow
2398 * disabled.
2399 */
2400static char *type_strings[] __devinitdata = {
2401 "no hardware", /* Should never be seen */
2402 "FarSync T2P",
2403 "FarSync T4P",
2404 "FarSync T1U",
2405 "FarSync T2U",
2406 "FarSync T4U",
2407 "FarSync TE1"
2408};
2409
2410static void __devinit
2411fst_init_card(struct fst_card_info *card)
2412{
2413 int i;
2414 int err;
2415
2416 /* We're working on a number of ports based on the card ID. If the
2417 * firmware detects something different later (should never happen)
2418 * we'll have to revise it in some way then.
2419 */
2420 for (i = 0; i < card->nports; i++) {
2421 err = register_hdlc_device(card->ports[i].dev);
2422 if (err < 0) {
2423 int j;
2424 printk_err ("Cannot register HDLC device for port %d"
2425 " (errno %d)\n", i, -err );
2426 for (j = i; j < card->nports; j++) {
2427 free_netdev(card->ports[j].dev);
2428 card->ports[j].dev = NULL;
2429 }
2430 card->nports = i;
2431 break;
2432 }
2433 }
2434
2435 printk_info("%s-%s: %s IRQ%d, %d ports\n",
2436 port_to_dev(&card->ports[0])->name,
2437 port_to_dev(&card->ports[card->nports - 1])->name,
2438 type_strings[card->type], card->irq, card->nports);
2439}
2440
2441/*
2442 * Initialise card when detected.
2443 * Returns 0 to indicate success, or errno otherwise.
2444 */
2445static int __devinit
2446fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2447{
2448 static int firsttime_done = 0;
2449 static int no_of_cards_added = 0;
2450 struct fst_card_info *card;
2451 int err = 0;
2452 int i;
2453
2454 if (!firsttime_done) {
2455 printk_info("FarSync WAN driver " FST_USER_VERSION
2456 " (c) 2001-2004 FarSite Communications Ltd.\n");
2457 firsttime_done = 1;
2458 dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
2459 }
2460
2461 /*
2462 * We are going to be clever and allow certain cards not to be
2463 * configured. An exclude list can be provided in /etc/modules.conf
2464 */
2465 if (fst_excluded_cards != 0) {
2466 /*
2467 * There are cards to exclude
2468 *
2469 */
2470 for (i = 0; i < fst_excluded_cards; i++) {
2471 if ((pdev->devfn) >> 3 == fst_excluded_list[i]) {
2472 printk_info("FarSync PCI device %d not assigned\n",
2473 (pdev->devfn) >> 3);
2474 return -EBUSY;
2475 }
2476 }
2477 }
2478
2479 /* Allocate driver private data */
2480 card = kmalloc(sizeof (struct fst_card_info), GFP_KERNEL);
2481 if (card == NULL) {
2482 printk_err("FarSync card found but insufficient memory for"
2483 " driver storage\n");
2484 return -ENOMEM;
2485 }
2486 memset(card, 0, sizeof (struct fst_card_info));
2487
2488 /* Try to enable the device */
2489 if ((err = pci_enable_device(pdev)) != 0) {
2490 printk_err("Failed to enable card. Err %d\n", -err);
2491 kfree(card);
2492 return err;
2493 }
2494
2495 if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
2496 printk_err("Failed to allocate regions. Err %d\n", -err);
2497 pci_disable_device(pdev);
2498 kfree(card);
2499 return err;
2500 }
2501
2502 /* Get virtual addresses of memory regions */
2503 card->pci_conf = pci_resource_start(pdev, 1);
2504 card->phys_mem = pci_resource_start(pdev, 2);
2505 card->phys_ctlmem = pci_resource_start(pdev, 3);
2506 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
2507 printk_err("Physical memory remap failed\n");
2508 pci_release_regions(pdev);
2509 pci_disable_device(pdev);
2510 kfree(card);
2511 return -ENODEV;
2512 }
2513 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
2514 printk_err("Control memory remap failed\n");
2515 pci_release_regions(pdev);
2516 pci_disable_device(pdev);
2517 kfree(card);
2518 return -ENODEV;
2519 }
2520 dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
2521
2522 /* Register the interrupt handler */
2523 if (request_irq(pdev->irq, fst_intr, SA_SHIRQ, FST_DEV_NAME, card)) {
2524 printk_err("Unable to register interrupt %d\n", card->irq);
2525 pci_release_regions(pdev);
2526 pci_disable_device(pdev);
2527 iounmap(card->ctlmem);
2528 iounmap(card->mem);
2529 kfree(card);
2530 return -ENODEV;
2531 }
2532
2533 /* Record info we need */
2534 card->irq = pdev->irq;
2535 card->type = ent->driver_data;
2536 card->family = ((ent->driver_data == FST_TYPE_T2P) ||
2537 (ent->driver_data == FST_TYPE_T4P))
2538 ? FST_FAMILY_TXP : FST_FAMILY_TXU;
2539 if ((ent->driver_data == FST_TYPE_T1U) ||
2540 (ent->driver_data == FST_TYPE_TE1))
2541 card->nports = 1;
2542 else
2543 card->nports = ((ent->driver_data == FST_TYPE_T2P) ||
2544 (ent->driver_data == FST_TYPE_T2U)) ? 2 : 4;
2545
2546 card->state = FST_UNINIT;
2547 spin_lock_init ( &card->card_lock );
2548
2549 for ( i = 0 ; i < card->nports ; i++ ) {
2550 struct net_device *dev = alloc_hdlcdev(&card->ports[i]);
2551 hdlc_device *hdlc;
2552 if (!dev) {
2553 while (i--)
2554 free_netdev(card->ports[i].dev);
2555 printk_err ("FarSync: out of memory\n");
2556 free_irq(card->irq, card);
2557 pci_release_regions(pdev);
2558 pci_disable_device(pdev);
2559 iounmap(card->ctlmem);
2560 iounmap(card->mem);
2561 kfree(card);
2562 return -ENODEV;
2563 }
2564 card->ports[i].dev = dev;
2565 card->ports[i].card = card;
2566 card->ports[i].index = i;
2567 card->ports[i].run = 0;
2568
2569 hdlc = dev_to_hdlc(dev);
2570
2571 /* Fill in the net device info */
2572 /* Since this is a PCI setup this is purely
2573 * informational. Give them the buffer addresses
2574 * and basic card I/O.
2575 */
2576 dev->mem_start = card->phys_mem
2577 + BUF_OFFSET ( txBuffer[i][0][0]);
2578 dev->mem_end = card->phys_mem
2579 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
2580 dev->base_addr = card->pci_conf;
2581 dev->irq = card->irq;
2582
2583 dev->tx_queue_len = FST_TX_QUEUE_LEN;
2584 dev->open = fst_open;
2585 dev->stop = fst_close;
2586 dev->do_ioctl = fst_ioctl;
2587 dev->watchdog_timeo = FST_TX_TIMEOUT;
2588 dev->tx_timeout = fst_tx_timeout;
2589 hdlc->attach = fst_attach;
2590 hdlc->xmit = fst_start_xmit;
2591 }
2592
2593 card->device = pdev;
2594
2595 dbg(DBG_PCI, "type %d nports %d irq %d\n", card->type,
2596 card->nports, card->irq);
2597 dbg(DBG_PCI, "conf %04x mem %08x ctlmem %08x\n",
2598 card->pci_conf, card->phys_mem, card->phys_ctlmem);
2599
2600 /* Reset the card's processor */
2601 fst_cpureset(card);
2602 card->state = FST_RESET;
2603
2604 /* Initialise DMA (if required) */
2605 fst_init_dma(card);
2606
2607 /* Record driver data for later use */
2608 pci_set_drvdata(pdev, card);
2609
2610 /* Remainder of card setup */
2611 fst_card_array[no_of_cards_added] = card;
2612 card->card_no = no_of_cards_added++; /* Record instance and bump it */
2613 fst_init_card(card);
2614 if (card->family == FST_FAMILY_TXU) {
2615 /*
2616 * Allocate a dma buffer for transmit and receives
2617 */
2618 card->rx_dma_handle_host =
2619 pci_alloc_consistent(card->device, FST_MAX_MTU,
2620 &card->rx_dma_handle_card);
2621 if (card->rx_dma_handle_host == NULL) {
2622 printk_err("Could not allocate rx dma buffer\n");
2623 fst_disable_intr(card);
2624 pci_release_regions(pdev);
2625 pci_disable_device(pdev);
2626 iounmap(card->ctlmem);
2627 iounmap(card->mem);
2628 kfree(card);
2629 return -ENOMEM;
2630 }
2631 card->tx_dma_handle_host =
2632 pci_alloc_consistent(card->device, FST_MAX_MTU,
2633 &card->tx_dma_handle_card);
2634 if (card->tx_dma_handle_host == NULL) {
2635 printk_err("Could not allocate tx dma buffer\n");
2636 fst_disable_intr(card);
2637 pci_release_regions(pdev);
2638 pci_disable_device(pdev);
2639 iounmap(card->ctlmem);
2640 iounmap(card->mem);
2641 kfree(card);
2642 return -ENOMEM;
2643 }
2644 }
2645 return 0; /* Success */
2646}
2647
2648/*
2649 * Cleanup and close down a card
2650 */
2651static void __devexit
2652fst_remove_one(struct pci_dev *pdev)
2653{
2654 struct fst_card_info *card;
2655 int i;
2656
2657 card = pci_get_drvdata(pdev);
2658
2659 for (i = 0; i < card->nports; i++) {
2660 struct net_device *dev = port_to_dev(&card->ports[i]);
2661 unregister_hdlc_device(dev);
2662 }
2663
2664 fst_disable_intr(card);
2665 free_irq(card->irq, card);
2666
2667 iounmap(card->ctlmem);
2668 iounmap(card->mem);
2669 pci_release_regions(pdev);
2670 if (card->family == FST_FAMILY_TXU) {
2671 /*
2672 * Free dma buffers
2673 */
2674 pci_free_consistent(card->device, FST_MAX_MTU,
2675 card->rx_dma_handle_host,
2676 card->rx_dma_handle_card);
2677 pci_free_consistent(card->device, FST_MAX_MTU,
2678 card->tx_dma_handle_host,
2679 card->tx_dma_handle_card);
2680 }
2681 fst_card_array[card->card_no] = NULL;
2682}
2683
2684static struct pci_driver fst_driver = {
2685 .name = FST_NAME,
2686 .id_table = fst_pci_dev_id,
2687 .probe = fst_add_one,
2688 .remove = __devexit_p(fst_remove_one),
2689 .suspend = NULL,
2690 .resume = NULL,
2691};
2692
2693static int __init
2694fst_init(void)
2695{
2696 int i;
2697
2698 for (i = 0; i < FST_MAX_CARDS; i++)
2699 fst_card_array[i] = NULL;
2700 spin_lock_init(&fst_work_q_lock);
2701 return pci_module_init(&fst_driver);
2702}
2703
2704static void __exit
2705fst_cleanup_module(void)
2706{
2707 printk_info("FarSync WAN driver unloading\n");
2708 pci_unregister_driver(&fst_driver);
2709}
2710
2711module_init(fst_init);
2712module_exit(fst_cleanup_module);
diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h
new file mode 100644
index 000000000000..d871dafa87a1
--- /dev/null
+++ b/drivers/net/wan/farsync.h
@@ -0,0 +1,357 @@
1/*
2 * FarSync X21 driver for Linux
3 *
4 * Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
5 *
6 * Copyright (C) 2001 FarSite Communications Ltd.
7 * www.farsite.co.uk
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * Author: R.J.Dunlop <bob.dunlop@farsite.co.uk>
15 *
16 * For the most part this file only contains structures and information
17 * that is visible to applications outside the driver. Shared memory
18 * layout etc is internal to the driver and described within farsync.c.
19 * Overlap exists in that the values used for some fields within the
20 * ioctl interface extend into the cards firmware interface so values in
21 * this file may not be changed arbitrarily.
22 */
23
24/* What's in a name
25 *
26 * The project name for this driver is Oscar. The driver is intended to be
27 * used with the FarSite T-Series cards (T2P & T4P) running in the high
28 * speed frame shifter mode. This is sometimes referred to as X.21 mode
29 * which is a complete misnomer as the card continues to support V.24 and
30 * V.35 as well as X.21.
31 *
32 * A short common prefix is useful for routines within the driver to avoid
33 * conflict with other similar drivers and I chosen to use "fst_" for this
34 * purpose (FarSite T-series).
35 *
36 * Finally the device driver needs a short network interface name. Since
37 * "hdlc" is already in use I've chosen the even less informative "sync"
38 * for the present.
39 */
40#define FST_NAME "fst" /* In debug/info etc */
41#define FST_NDEV_NAME "sync" /* For net interface */
42#define FST_DEV_NAME "farsync" /* For misc interfaces */
43
44
45/* User version number
46 *
47 * This version number is incremented with each official release of the
48 * package and is a simplified number for normal user reference.
49 * Individual files are tracked by the version control system and may
50 * have individual versions (or IDs) that move much faster than the
51 * the release version as individual updates are tracked.
52 */
53#define FST_USER_VERSION "1.04"
54
55
56/* Ioctl call command values
57 *
58 * The first three private ioctls are used by the sync-PPP module,
59 * allowing a little room for expansion we start our numbering at 10.
60 */
61#define FSTWRITE (SIOCDEVPRIVATE+10)
62#define FSTCPURESET (SIOCDEVPRIVATE+11)
63#define FSTCPURELEASE (SIOCDEVPRIVATE+12)
64#define FSTGETCONF (SIOCDEVPRIVATE+13)
65#define FSTSETCONF (SIOCDEVPRIVATE+14)
66
67
68/* FSTWRITE
69 *
70 * Used to write a block of data (firmware etc) before the card is running
71 */
72struct fstioc_write {
73 unsigned int size;
74 unsigned int offset;
75 unsigned char data[0];
76};
77
78
79/* FSTCPURESET and FSTCPURELEASE
80 *
81 * These take no additional data.
82 * FSTCPURESET forces the cards CPU into a reset state and holds it there.
83 * FSTCPURELEASE releases the CPU from this reset state allowing it to run,
84 * the reset vector should be setup before this ioctl is run.
85 */
86
87/* FSTGETCONF and FSTSETCONF
88 *
89 * Get and set a card/ports configuration.
90 * In order to allow selective setting of items and for the kernel to
91 * indicate a partial status response the first field "valid" is a bitmask
92 * indicating which other fields in the structure are valid.
93 * Many of the field names in this structure match those used in the
94 * firmware shared memory configuration interface and come originally from
95 * the NT header file Smc.h
96 *
97 * When used with FSTGETCONF this structure should be zeroed before use.
98 * This is to allow for possible future expansion when some of the fields
99 * might be used to indicate a different (expanded) structure.
100 */
101struct fstioc_info {
102 unsigned int valid; /* Bits of structure that are valid */
103 unsigned int nports; /* Number of serial ports */
104 unsigned int type; /* Type index of card */
105 unsigned int state; /* State of card */
106 unsigned int index; /* Index of port ioctl was issued on */
107 unsigned int smcFirmwareVersion;
108 unsigned long kernelVersion; /* What Kernel version we are working with */
109 unsigned short lineInterface; /* Physical interface type */
110 unsigned char proto; /* Line protocol */
111 unsigned char internalClock; /* 1 => internal clock, 0 => external */
112 unsigned int lineSpeed; /* Speed in bps */
113 unsigned int v24IpSts; /* V.24 control input status */
114 unsigned int v24OpSts; /* V.24 control output status */
115 unsigned short clockStatus; /* lsb: 0=> present, 1=> absent */
116 unsigned short cableStatus; /* lsb: 0=> present, 1=> absent */
117 unsigned short cardMode; /* lsb: LED id mode */
118 unsigned short debug; /* Debug flags */
119 unsigned char transparentMode; /* Not used always 0 */
120 unsigned char invertClock; /* Invert clock feature for syncing */
121 unsigned char startingSlot; /* Time slot to use for start of tx */
122 unsigned char clockSource; /* External or internal */
123 unsigned char framing; /* E1, T1 or J1 */
124 unsigned char structure; /* unframed, double, crc4, f4, f12, */
125 /* f24 f72 */
126 unsigned char interface; /* rj48c or bnc */
127 unsigned char coding; /* hdb3 b8zs */
128 unsigned char lineBuildOut; /* 0, -7.5, -15, -22 */
129 unsigned char equalizer; /* short or lon haul settings */
130 unsigned char loopMode; /* various loopbacks */
131 unsigned char range; /* cable lengths */
132 unsigned char txBufferMode; /* tx elastic buffer depth */
133 unsigned char rxBufferMode; /* rx elastic buffer depth */
134 unsigned char losThreshold; /* Attenuation on LOS signal */
135 unsigned char idleCode; /* Value to send as idle timeslot */
136 unsigned int receiveBufferDelay; /* delay thro rx buffer timeslots */
137 unsigned int framingErrorCount; /* framing errors */
138 unsigned int codeViolationCount; /* code violations */
139 unsigned int crcErrorCount; /* CRC errors */
140 int lineAttenuation; /* in dB*/
141 unsigned short lossOfSignal;
142 unsigned short receiveRemoteAlarm;
143 unsigned short alarmIndicationSignal;
144};
145
146/* "valid" bitmask */
147#define FSTVAL_NONE 0x00000000 /* Nothing valid (firmware not running).
148 * Slight misnomer. In fact nports,
149 * type, state and index will be set
150 * based on hardware detected.
151 */
152#define FSTVAL_OMODEM 0x0000001F /* First 5 bits correspond to the
153 * output status bits defined for
154 * v24OpSts
155 */
156#define FSTVAL_SPEED 0x00000020 /* internalClock, lineSpeed, clockStatus
157 */
158#define FSTVAL_CABLE 0x00000040 /* lineInterface, cableStatus */
159#define FSTVAL_IMODEM 0x00000080 /* v24IpSts */
160#define FSTVAL_CARD 0x00000100 /* nports, type, state, index,
161 * smcFirmwareVersion
162 */
163#define FSTVAL_PROTO 0x00000200 /* proto */
164#define FSTVAL_MODE 0x00000400 /* cardMode */
165#define FSTVAL_PHASE 0x00000800 /* Clock phase */
166#define FSTVAL_TE1 0x00001000 /* T1E1 Configuration */
167#define FSTVAL_DEBUG 0x80000000 /* debug */
168#define FSTVAL_ALL 0x00001FFF /* Note: does not include DEBUG flag */
169
170/* "type" */
171#define FST_TYPE_NONE 0 /* Probably should never happen */
172#define FST_TYPE_T2P 1 /* T2P X21 2 port card */
173#define FST_TYPE_T4P 2 /* T4P X21 4 port card */
174#define FST_TYPE_T1U 3 /* T1U X21 1 port card */
175#define FST_TYPE_T2U 4 /* T2U X21 2 port card */
176#define FST_TYPE_T4U 5 /* T4U X21 4 port card */
177#define FST_TYPE_TE1 6 /* T1E1 X21 1 port card */
178
179/* "family" */
180#define FST_FAMILY_TXP 0 /* T2P or T4P */
181#define FST_FAMILY_TXU 1 /* T1U or T2U or T4U */
182
183/* "state" */
184#define FST_UNINIT 0 /* Raw uninitialised state following
185 * system startup */
186#define FST_RESET 1 /* Processor held in reset state */
187#define FST_DOWNLOAD 2 /* Card being downloaded */
188#define FST_STARTING 3 /* Released following download */
189#define FST_RUNNING 4 /* Processor running */
190#define FST_BADVERSION 5 /* Bad shared memory version detected */
191#define FST_HALTED 6 /* Processor flagged a halt */
192#define FST_IFAILED 7 /* Firmware issued initialisation failed
193 * interrupt
194 */
195/* "lineInterface" */
196#define V24 1
197#define X21 2
198#define V35 3
199#define X21D 4
200#define T1 5
201#define E1 6
202#define J1 7
203
204/* "proto" */
205#define FST_HDLC 1 /* Cisco compatible HDLC */
206#define FST_PPP 2 /* Sync PPP */
207#define FST_MONITOR 3 /* Monitor only (raw packet reception) */
208#define FST_RAW 4 /* Two way raw packets */
209#define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */
210
211/* "internalClock" */
212#define INTCLK 1
213#define EXTCLK 0
214
215/* "v24IpSts" bitmask */
216#define IPSTS_CTS 0x00000001 /* Clear To Send (Indicate for X.21) */
217#define IPSTS_INDICATE IPSTS_CTS
218#define IPSTS_DSR 0x00000002 /* Data Set Ready (T2P Port A) */
219#define IPSTS_DCD 0x00000004 /* Data Carrier Detect */
220#define IPSTS_RI 0x00000008 /* Ring Indicator (T2P Port A) */
221#define IPSTS_TMI 0x00000010 /* Test Mode Indicator (Not Supported)*/
222
223/* "v24OpSts" bitmask */
224#define OPSTS_RTS 0x00000001 /* Request To Send (Control for X.21) */
225#define OPSTS_CONTROL OPSTS_RTS
226#define OPSTS_DTR 0x00000002 /* Data Terminal Ready */
227#define OPSTS_DSRS 0x00000004 /* Data Signalling Rate Select (Not
228 * Supported) */
229#define OPSTS_SS 0x00000008 /* Select Standby (Not Supported) */
230#define OPSTS_LL 0x00000010 /* Maintenance Test (Not Supported) */
231
232/* "cardMode" bitmask */
233#define CARD_MODE_IDENTIFY 0x0001
234
235/*
236 * Constants for T1/E1 configuration
237 */
238
239/*
240 * Clock source
241 */
242#define CLOCKING_SLAVE 0
243#define CLOCKING_MASTER 1
244
245/*
246 * Framing
247 */
248#define FRAMING_E1 0
249#define FRAMING_J1 1
250#define FRAMING_T1 2
251
252/*
253 * Structure
254 */
255#define STRUCTURE_UNFRAMED 0
256#define STRUCTURE_E1_DOUBLE 1
257#define STRUCTURE_E1_CRC4 2
258#define STRUCTURE_E1_CRC4M 3
259#define STRUCTURE_T1_4 4
260#define STRUCTURE_T1_12 5
261#define STRUCTURE_T1_24 6
262#define STRUCTURE_T1_72 7
263
264/*
265 * Interface
266 */
267#define INTERFACE_RJ48C 0
268#define INTERFACE_BNC 1
269
270/*
271 * Coding
272 */
273
274#define CODING_HDB3 0
275#define CODING_NRZ 1
276#define CODING_CMI 2
277#define CODING_CMI_HDB3 3
278#define CODING_CMI_B8ZS 4
279#define CODING_AMI 5
280#define CODING_AMI_ZCS 6
281#define CODING_B8ZS 7
282
283/*
284 * Line Build Out
285 */
286#define LBO_0dB 0
287#define LBO_7dB5 1
288#define LBO_15dB 2
289#define LBO_22dB5 3
290
291/*
292 * Range for long haul t1 > 655ft
293 */
294#define RANGE_0_133_FT 0
295#define RANGE_0_40_M RANGE_0_133_FT
296#define RANGE_133_266_FT 1
297#define RANGE_40_81_M RANGE_133_266_FT
298#define RANGE_266_399_FT 2
299#define RANGE_81_122_M RANGE_266_399_FT
300#define RANGE_399_533_FT 3
301#define RANGE_122_162_M RANGE_399_533_FT
302#define RANGE_533_655_FT 4
303#define RANGE_162_200_M RANGE_533_655_FT
304/*
305 * Receive Equaliser
306 */
307#define EQUALIZER_SHORT 0
308#define EQUALIZER_LONG 1
309
310/*
311 * Loop modes
312 */
313#define LOOP_NONE 0
314#define LOOP_LOCAL 1
315#define LOOP_PAYLOAD_EXC_TS0 2
316#define LOOP_PAYLOAD_INC_TS0 3
317#define LOOP_REMOTE 4
318
319/*
320 * Buffer modes
321 */
322#define BUFFER_2_FRAME 0
323#define BUFFER_1_FRAME 1
324#define BUFFER_96_BIT 2
325#define BUFFER_NONE 3
326
327/* Debug support
328 *
329 * These should only be enabled for development kernels, production code
330 * should define FST_DEBUG=0 in order to exclude the code.
331 * Setting FST_DEBUG=1 will include all the debug code but in a disabled
332 * state, use the FSTSETCONF ioctl to enable specific debug actions, or
333 * FST_DEBUG can be set to prime the debug selection.
334 */
335#define FST_DEBUG 0x0000
336#if FST_DEBUG
337
338extern int fst_debug_mask; /* Bit mask of actions to debug, bits
339 * listed below. Note: Bit 0 is used
340 * to trigger the inclusion of this
341 * code, without enabling any actions.
342 */
343#define DBG_INIT 0x0002 /* Card detection and initialisation */
344#define DBG_OPEN 0x0004 /* Open and close sequences */
345#define DBG_PCI 0x0008 /* PCI config operations */
346#define DBG_IOCTL 0x0010 /* Ioctls and other config */
347#define DBG_INTR 0x0020 /* Interrupt routines (be careful) */
348#define DBG_TX 0x0040 /* Packet transmission */
349#define DBG_RX 0x0080 /* Packet reception */
350#define DBG_CMD 0x0100 /* Port command issuing */
351
352#define DBG_ASS 0xFFFF /* Assert like statements. Code that
353 * should never be reached, if you see
354 * one of these then I've been an ass
355 */
356#endif /* FST_DEBUG */
357
diff --git a/drivers/net/wan/hd64570.h b/drivers/net/wan/hd64570.h
new file mode 100644
index 000000000000..3839662ff201
--- /dev/null
+++ b/drivers/net/wan/hd64570.h
@@ -0,0 +1,241 @@
1#ifndef __HD64570_H
2#define __HD64570_H
3
4/* SCA HD64570 register definitions - all addresses for mode 0 (8086 MPU)
5 and 1 (64180 MPU). For modes 2 and 3, XOR the address with 0x01.
6
7 Source: HD64570 SCA User's Manual
8*/
9
10
11
12/* SCA Control Registers */
13#define LPR 0x00 /* Low Power */
14
15/* Wait controller registers */
16#define PABR0 0x02 /* Physical Address Boundary 0 */
17#define PABR1 0x03 /* Physical Address Boundary 1 */
18#define WCRL 0x04 /* Wait Control L */
19#define WCRM 0x05 /* Wait Control M */
20#define WCRH 0x06 /* Wait Control H */
21
22#define PCR 0x08 /* DMA Priority Control */
23#define DMER 0x09 /* DMA Master Enable */
24
25
26/* Interrupt registers */
27#define ISR0 0x10 /* Interrupt Status 0 */
28#define ISR1 0x11 /* Interrupt Status 1 */
29#define ISR2 0x12 /* Interrupt Status 2 */
30
31#define IER0 0x14 /* Interrupt Enable 0 */
32#define IER1 0x15 /* Interrupt Enable 1 */
33#define IER2 0x16 /* Interrupt Enable 2 */
34
35#define ITCR 0x18 /* Interrupt Control */
36#define IVR 0x1A /* Interrupt Vector */
37#define IMVR 0x1C /* Interrupt Modified Vector */
38
39
40
41/* MSCI channel (port) 0 registers - offset 0x20
42 MSCI channel (port) 1 registers - offset 0x40 */
43
44#define MSCI0_OFFSET 0x20
45#define MSCI1_OFFSET 0x40
46
47#define TRBL 0x00 /* TX/RX buffer L */
48#define TRBH 0x01 /* TX/RX buffer H */
49#define ST0 0x02 /* Status 0 */
50#define ST1 0x03 /* Status 1 */
51#define ST2 0x04 /* Status 2 */
52#define ST3 0x05 /* Status 3 */
53#define FST 0x06 /* Frame Status */
54#define IE0 0x08 /* Interrupt Enable 0 */
55#define IE1 0x09 /* Interrupt Enable 1 */
56#define IE2 0x0A /* Interrupt Enable 2 */
57#define FIE 0x0B /* Frame Interrupt Enable */
58#define CMD 0x0C /* Command */
59#define MD0 0x0E /* Mode 0 */
60#define MD1 0x0F /* Mode 1 */
61#define MD2 0x10 /* Mode 2 */
62#define CTL 0x11 /* Control */
63#define SA0 0x12 /* Sync/Address 0 */
64#define SA1 0x13 /* Sync/Address 1 */
65#define IDL 0x14 /* Idle Pattern */
66#define TMC 0x15 /* Time Constant */
67#define RXS 0x16 /* RX Clock Source */
68#define TXS 0x17 /* TX Clock Source */
69#define TRC0 0x18 /* TX Ready Control 0 */
70#define TRC1 0x19 /* TX Ready Control 1 */
71#define RRC 0x1A /* RX Ready Control */
72#define CST0 0x1C /* Current Status 0 */
73#define CST1 0x1D /* Current Status 1 */
74
75
76/* Timer channel 0 (port 0 RX) registers - offset 0x60
77 Timer channel 1 (port 0 TX) registers - offset 0x68
78 Timer channel 2 (port 1 RX) registers - offset 0x70
79 Timer channel 3 (port 1 TX) registers - offset 0x78
80*/
81
82#define TIMER0RX_OFFSET 0x60
83#define TIMER0TX_OFFSET 0x68
84#define TIMER1RX_OFFSET 0x70
85#define TIMER1TX_OFFSET 0x78
86
87#define TCNTL 0x00 /* Up-counter L */
88#define TCNTH 0x01 /* Up-counter H */
89#define TCONRL 0x02 /* Constant L */
90#define TCONRH 0x03 /* Constant H */
91#define TCSR 0x04 /* Control/Status */
92#define TEPR 0x05 /* Expand Prescale */
93
94
95
96/* DMA channel 0 (port 0 RX) registers - offset 0x80
97 DMA channel 1 (port 0 TX) registers - offset 0xA0
98 DMA channel 2 (port 1 RX) registers - offset 0xC0
99 DMA channel 3 (port 1 TX) registers - offset 0xE0
100*/
101
102#define DMAC0RX_OFFSET 0x80
103#define DMAC0TX_OFFSET 0xA0
104#define DMAC1RX_OFFSET 0xC0
105#define DMAC1TX_OFFSET 0xE0
106
107#define BARL 0x00 /* Buffer Address L (chained block) */
108#define BARH 0x01 /* Buffer Address H (chained block) */
109#define BARB 0x02 /* Buffer Address B (chained block) */
110
111#define DARL 0x00 /* RX Destination Addr L (single block) */
112#define DARH 0x01 /* RX Destination Addr H (single block) */
113#define DARB 0x02 /* RX Destination Addr B (single block) */
114
115#define SARL 0x04 /* TX Source Address L (single block) */
116#define SARH 0x05 /* TX Source Address H (single block) */
117#define SARB 0x06 /* TX Source Address B (single block) */
118
119#define CPB 0x06 /* Chain Pointer Base (chained block) */
120
121#define CDAL 0x08 /* Current Descriptor Addr L (chained block) */
122#define CDAH 0x09 /* Current Descriptor Addr H (chained block) */
123#define EDAL 0x0A /* Error Descriptor Addr L (chained block) */
124#define EDAH 0x0B /* Error Descriptor Addr H (chained block) */
125#define BFLL 0x0C /* RX Receive Buffer Length L (chained block)*/
126#define BFLH 0x0D /* RX Receive Buffer Length H (chained block)*/
127#define BCRL 0x0E /* Byte Count L */
128#define BCRH 0x0F /* Byte Count H */
129#define DSR 0x10 /* DMA Status */
130#define DSR_RX(node) (DSR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
131#define DSR_TX(node) (DSR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
132#define DMR 0x11 /* DMA Mode */
133#define DMR_RX(node) (DMR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
134#define DMR_TX(node) (DMR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
135#define FCT 0x13 /* Frame End Interrupt Counter */
136#define FCT_RX(node) (FCT + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
137#define FCT_TX(node) (FCT + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
138#define DIR 0x14 /* DMA Interrupt Enable */
139#define DIR_RX(node) (DIR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
140#define DIR_TX(node) (DIR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
141#define DCR 0x15 /* DMA Command */
142#define DCR_RX(node) (DCR + (node ? DMAC1RX_OFFSET : DMAC0RX_OFFSET))
143#define DCR_TX(node) (DCR + (node ? DMAC1TX_OFFSET : DMAC0TX_OFFSET))
144
145
146
147
148/* Descriptor Structure */
149
150typedef struct {
151 u16 cp; /* Chain Pointer */
152 u32 bp; /* Buffer Pointer (24 bits) */
153 u16 len; /* Data Length */
154 u8 stat; /* Status */
155 u8 unused; /* pads to 2-byte boundary */
156}__attribute__ ((packed)) pkt_desc;
157
158
159/* Packet Descriptor Status bits */
160
161#define ST_TX_EOM 0x80 /* End of frame */
162#define ST_TX_EOT 0x01 /* End of transmition */
163
164#define ST_RX_EOM 0x80 /* End of frame */
165#define ST_RX_SHORT 0x40 /* Short frame */
166#define ST_RX_ABORT 0x20 /* Abort */
167#define ST_RX_RESBIT 0x10 /* Residual bit */
168#define ST_RX_OVERRUN 0x08 /* Overrun */
169#define ST_RX_CRC 0x04 /* CRC */
170
171#define ST_ERROR_MASK 0x7C
172
173#define DIR_EOTE 0x80 /* Transfer completed */
174#define DIR_EOME 0x40 /* Frame Transfer Completed (chained-block) */
175#define DIR_BOFE 0x20 /* Buffer Overflow/Underflow (chained-block)*/
176#define DIR_COFE 0x10 /* Counter Overflow (chained-block) */
177
178
179#define DSR_EOT 0x80 /* Transfer completed */
180#define DSR_EOM 0x40 /* Frame Transfer Completed (chained-block) */
181#define DSR_BOF 0x20 /* Buffer Overflow/Underflow (chained-block)*/
182#define DSR_COF 0x10 /* Counter Overflow (chained-block) */
183#define DSR_DE 0x02 /* DMA Enable */
184#define DSR_DWE 0x01 /* DMA Write Disable */
185
186/* DMA Master Enable Register (DMER) bits */
187#define DMER_DME 0x80 /* DMA Master Enable */
188
189
190#define CMD_RESET 0x21 /* Reset Channel */
191#define CMD_TX_ENABLE 0x02 /* Start transmitter */
192#define CMD_RX_ENABLE 0x12 /* Start receiver */
193
194#define MD0_HDLC 0x80 /* Bit-sync HDLC mode */
195#define MD0_CRC_ENA 0x04 /* Enable CRC code calculation */
196#define MD0_CRC_CCITT 0x02 /* CCITT CRC instead of CRC-16 */
197#define MD0_CRC_PR1 0x01 /* Initial all-ones instead of all-zeros */
198
199#define MD0_CRC_NONE 0x00
200#define MD0_CRC_16_0 0x04
201#define MD0_CRC_16 0x05
202#define MD0_CRC_ITU_0 0x06
203#define MD0_CRC_ITU 0x07
204
205#define MD2_NRZ 0x00
206#define MD2_NRZI 0x20
207#define MD2_MANCHESTER 0x80
208#define MD2_FM_MARK 0xA0
209#define MD2_FM_SPACE 0xC0
210#define MD2_LOOPBACK 0x03 /* Local data Loopback */
211
212#define CTL_NORTS 0x01
213#define CTL_IDLE 0x10 /* Transmit an idle pattern */
214#define CTL_UDRNC 0x20 /* Idle after CRC or FCS+flag transmition */
215
216#define ST0_TXRDY 0x02 /* TX ready */
217#define ST0_RXRDY 0x01 /* RX ready */
218
219#define ST1_UDRN 0x80 /* MSCI TX underrun */
220#define ST1_CDCD 0x04 /* DCD level changed */
221
222#define ST3_CTS 0x08 /* modem input - /CTS */
223#define ST3_DCD 0x04 /* modem input - /DCD */
224
225#define IE0_TXINT 0x80 /* TX INT MSCI interrupt enable */
226#define IE0_RXINTA 0x40 /* RX INT A MSCI interrupt enable */
227#define IE1_UDRN 0x80 /* TX underrun MSCI interrupt enable */
228#define IE1_CDCD 0x04 /* DCD level changed */
229
230#define DCR_ABORT 0x01 /* Software abort command */
231#define DCR_CLEAR_EOF 0x02 /* Clear EOF interrupt */
232
233/* TX and RX Clock Source - RXS and TXS */
234#define CLK_BRG_MASK 0x0F
235#define CLK_LINE_RX 0x00 /* TX/RX clock line input */
236#define CLK_LINE_TX 0x00 /* TX/RX line input */
237#define CLK_BRG_RX 0x40 /* internal baud rate generator */
238#define CLK_BRG_TX 0x40 /* internal baud rate generator */
239#define CLK_RXCLK_TX 0x60 /* TX clock from RX clock */
240
241#endif
diff --git a/drivers/net/wan/hd64572.h b/drivers/net/wan/hd64572.h
new file mode 100644
index 000000000000..96567c2dc4db
--- /dev/null
+++ b/drivers/net/wan/hd64572.h
@@ -0,0 +1,527 @@
1/*
2 * hd64572.h Description of the Hitachi HD64572 (SCA-II), valid for
3 * CPU modes 0 & 2.
4 *
5 * Author: Ivan Passos <ivan@cyclades.com>
6 *
7 * Copyright: (c) 2000-2001 Cyclades Corp.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * $Log: hd64572.h,v $
15 * Revision 3.1 2001/06/15 12:41:10 regina
16 * upping major version number
17 *
18 * Revision 1.1.1.1 2001/06/13 20:24:49 daniela
19 * PC300 initial CVS version (3.4.0-pre1)
20 *
21 * Revision 1.0 2000/01/25 ivan
22 * Initial version.
23 *
24 */
25
26#ifndef __HD64572_H
27#define __HD64572_H
28
29/* Illegal Access Register */
30#define ILAR 0x00
31
32/* Wait Controller Registers */
33#define PABR0L 0x20 /* Physical Addr Boundary Register 0 L */
34#define PABR0H 0x21 /* Physical Addr Boundary Register 0 H */
35#define PABR1L 0x22 /* Physical Addr Boundary Register 1 L */
36#define PABR1H 0x23 /* Physical Addr Boundary Register 1 H */
37#define WCRL 0x24 /* Wait Control Register L */
38#define WCRM 0x25 /* Wait Control Register M */
39#define WCRH 0x26 /* Wait Control Register H */
40
41/* Interrupt Registers */
42#define IVR 0x60 /* Interrupt Vector Register */
43#define IMVR 0x64 /* Interrupt Modified Vector Register */
44#define ITCR 0x68 /* Interrupt Control Register */
45#define ISR0 0x6c /* Interrupt Status Register 0 */
46#define ISR1 0x70 /* Interrupt Status Register 1 */
47#define IER0 0x74 /* Interrupt Enable Register 0 */
48#define IER1 0x78 /* Interrupt Enable Register 1 */
49
50/* Register Access Macros (chan is 0 or 1 in _any_ case) */
51#define M_REG(reg, chan) (reg + 0x80*chan) /* MSCI */
52#define DRX_REG(reg, chan) (reg + 0x40*chan) /* DMA Rx */
53#define DTX_REG(reg, chan) (reg + 0x20*(2*chan + 1)) /* DMA Tx */
54#define TRX_REG(reg, chan) (reg + 0x20*chan) /* Timer Rx */
55#define TTX_REG(reg, chan) (reg + 0x10*(2*chan + 1)) /* Timer Tx */
56#define ST_REG(reg, chan) (reg + 0x80*chan) /* Status Cnt */
57#define IR0_DRX(val, chan) ((val)<<(8*(chan))) /* Int DMA Rx */
58#define IR0_DTX(val, chan) ((val)<<(4*(2*chan + 1))) /* Int DMA Tx */
59#define IR0_M(val, chan) ((val)<<(8*(chan))) /* Int MSCI */
60
61/* MSCI Channel Registers */
62#define MSCI0_OFFSET 0x00
63#define MSCI1_OFFSET 0x80
64
65#define MD0 0x138 /* Mode reg 0 */
66#define MD1 0x139 /* Mode reg 1 */
67#define MD2 0x13a /* Mode reg 2 */
68#define MD3 0x13b /* Mode reg 3 */
69#define CTL 0x130 /* Control reg */
70#define RXS 0x13c /* RX clock source */
71#define TXS 0x13d /* TX clock source */
72#define EXS 0x13e /* External clock input selection */
73#define TMCT 0x144 /* Time constant (Tx) */
74#define TMCR 0x145 /* Time constant (Rx) */
75#define CMD 0x128 /* Command reg */
76#define ST0 0x118 /* Status reg 0 */
77#define ST1 0x119 /* Status reg 1 */
78#define ST2 0x11a /* Status reg 2 */
79#define ST3 0x11b /* Status reg 3 */
80#define ST4 0x11c /* Status reg 4 */
81#define FST 0x11d /* frame Status reg */
82#define IE0 0x120 /* Interrupt enable reg 0 */
83#define IE1 0x121 /* Interrupt enable reg 1 */
84#define IE2 0x122 /* Interrupt enable reg 2 */
85#define IE4 0x124 /* Interrupt enable reg 4 */
86#define FIE 0x125 /* Frame Interrupt enable reg */
87#define SA0 0x140 /* Syn Address reg 0 */
88#define SA1 0x141 /* Syn Address reg 1 */
89#define IDL 0x142 /* Idle register */
90#define TRBL 0x100 /* TX/RX buffer reg L */
91#define TRBK 0x101 /* TX/RX buffer reg K */
92#define TRBJ 0x102 /* TX/RX buffer reg J */
93#define TRBH 0x103 /* TX/RX buffer reg H */
94#define TRC0 0x148 /* TX Ready control reg 0 */
95#define TRC1 0x149 /* TX Ready control reg 1 */
96#define RRC 0x14a /* RX Ready control reg */
97#define CST0 0x108 /* Current Status Register 0 */
98#define CST1 0x109 /* Current Status Register 1 */
99#define CST2 0x10a /* Current Status Register 2 */
100#define CST3 0x10b /* Current Status Register 3 */
101#define GPO 0x131 /* General Purpose Output Pin Ctl Reg */
102#define TFS 0x14b /* Tx Start Threshold Ctl Reg */
103#define TFN 0x143 /* Inter-transmit-frame Time Fill Ctl Reg */
104#define TBN 0x110 /* Tx Buffer Number Reg */
105#define RBN 0x111 /* Rx Buffer Number Reg */
106#define TNR0 0x150 /* Tx DMA Request Ctl Reg 0 */
107#define TNR1 0x151 /* Tx DMA Request Ctl Reg 1 */
108#define TCR 0x152 /* Tx DMA Critical Request Reg */
109#define RNR 0x154 /* Rx DMA Request Ctl Reg */
110#define RCR 0x156 /* Rx DMA Critical Request Reg */
111
112/* Timer Registers */
113#define TIMER0RX_OFFSET 0x00
114#define TIMER0TX_OFFSET 0x10
115#define TIMER1RX_OFFSET 0x20
116#define TIMER1TX_OFFSET 0x30
117
118#define TCNTL 0x200 /* Timer Upcounter L */
119#define TCNTH 0x201 /* Timer Upcounter H */
120#define TCONRL 0x204 /* Timer Constant Register L */
121#define TCONRH 0x205 /* Timer Constant Register H */
122#define TCSR 0x206 /* Timer Control/Status Register */
123#define TEPR 0x207 /* Timer Expand Prescale Register */
124
125/* DMA registers */
126#define PCR 0x40 /* DMA priority control reg */
127#define DRR 0x44 /* DMA reset reg */
128#define DMER 0x07 /* DMA Master Enable reg */
129#define BTCR 0x08 /* Burst Tx Ctl Reg */
130#define BOLR 0x0c /* Back-off Length Reg */
131#define DSR_RX(chan) (0x48 + 2*chan) /* DMA Status Reg (Rx) */
132#define DSR_TX(chan) (0x49 + 2*chan) /* DMA Status Reg (Tx) */
133#define DIR_RX(chan) (0x4c + 2*chan) /* DMA Interrupt Enable Reg (Rx) */
134#define DIR_TX(chan) (0x4d + 2*chan) /* DMA Interrupt Enable Reg (Tx) */
135#define FCT_RX(chan) (0x50 + 2*chan) /* Frame End Interrupt Counter (Rx) */
136#define FCT_TX(chan) (0x51 + 2*chan) /* Frame End Interrupt Counter (Tx) */
137#define DMR_RX(chan) (0x54 + 2*chan) /* DMA Mode Reg (Rx) */
138#define DMR_TX(chan) (0x55 + 2*chan) /* DMA Mode Reg (Tx) */
139#define DCR_RX(chan) (0x58 + 2*chan) /* DMA Command Reg (Rx) */
140#define DCR_TX(chan) (0x59 + 2*chan) /* DMA Command Reg (Tx) */
141
142/* DMA Channel Registers */
143#define DMAC0RX_OFFSET 0x00
144#define DMAC0TX_OFFSET 0x20
145#define DMAC1RX_OFFSET 0x40
146#define DMAC1TX_OFFSET 0x60
147
148#define DARL 0x80 /* Dest Addr Register L (single-block, RX only) */
149#define DARH 0x81 /* Dest Addr Register H (single-block, RX only) */
150#define DARB 0x82 /* Dest Addr Register B (single-block, RX only) */
151#define DARBH 0x83 /* Dest Addr Register BH (single-block, RX only) */
152#define SARL 0x80 /* Source Addr Register L (single-block, TX only) */
153#define SARH 0x81 /* Source Addr Register H (single-block, TX only) */
154#define SARB 0x82 /* Source Addr Register B (single-block, TX only) */
155#define DARBH 0x83 /* Source Addr Register BH (single-block, TX only) */
156#define BARL 0x80 /* Buffer Addr Register L (chained-block) */
157#define BARH 0x81 /* Buffer Addr Register H (chained-block) */
158#define BARB 0x82 /* Buffer Addr Register B (chained-block) */
159#define BARBH 0x83 /* Buffer Addr Register BH (chained-block) */
160#define CDAL 0x84 /* Current Descriptor Addr Register L */
161#define CDAH 0x85 /* Current Descriptor Addr Register H */
162#define CDAB 0x86 /* Current Descriptor Addr Register B */
163#define CDABH 0x87 /* Current Descriptor Addr Register BH */
164#define EDAL 0x88 /* Error Descriptor Addr Register L */
165#define EDAH 0x89 /* Error Descriptor Addr Register H */
166#define EDAB 0x8a /* Error Descriptor Addr Register B */
167#define EDABH 0x8b /* Error Descriptor Addr Register BH */
168#define BFLL 0x90 /* RX Buffer Length L (only RX) */
169#define BFLH 0x91 /* RX Buffer Length H (only RX) */
170#define BCRL 0x8c /* Byte Count Register L */
171#define BCRH 0x8d /* Byte Count Register H */
172
173/* Block Descriptor Structure */
174typedef struct {
175 unsigned long next; /* pointer to next block descriptor */
176 unsigned long ptbuf; /* buffer pointer */
177 unsigned short len; /* data length */
178 unsigned char status; /* status */
179 unsigned char filler[5]; /* alignment filler (16 bytes) */
180} pcsca_bd_t;
181
182/* Block Descriptor Structure */
183typedef struct {
184 u32 cp; /* pointer to next block descriptor */
185 u32 bp; /* buffer pointer */
186 u16 len; /* data length */
187 u8 stat; /* status */
188 u8 unused; /* pads to 4-byte boundary */
189}pkt_desc;
190
191
192/*
193 Descriptor Status definitions:
194
195 Bit Transmission Reception
196
197 7 EOM EOM
198 6 - Short Frame
199 5 - Abort
200 4 - Residual bit
201 3 Underrun Overrun
202 2 - CRC
203 1 Ownership Ownership
204 0 EOT -
205*/
206#define DST_EOT 0x01 /* End of transmit command */
207#define DST_OSB 0x02 /* Ownership bit */
208#define DST_CRC 0x04 /* CRC Error */
209#define DST_OVR 0x08 /* Overrun */
210#define DST_UDR 0x08 /* Underrun */
211#define DST_RBIT 0x10 /* Residual bit */
212#define DST_ABT 0x20 /* Abort */
213#define DST_SHRT 0x40 /* Short Frame */
214#define DST_EOM 0x80 /* End of Message */
215
216/* Packet Descriptor Status bits */
217
218#define ST_TX_EOM 0x80 /* End of frame */
219#define ST_TX_UNDRRUN 0x08
220#define ST_TX_OWNRSHP 0x02
221#define ST_TX_EOT 0x01 /* End of transmition */
222
223#define ST_RX_EOM 0x80 /* End of frame */
224#define ST_RX_SHORT 0x40 /* Short frame */
225#define ST_RX_ABORT 0x20 /* Abort */
226#define ST_RX_RESBIT 0x10 /* Residual bit */
227#define ST_RX_OVERRUN 0x08 /* Overrun */
228#define ST_RX_CRC 0x04 /* CRC */
229#define ST_RX_OWNRSHP 0x02
230
231#define ST_ERROR_MASK 0x7C
232
233/* Status Counter Registers */
234#define CMCR 0x158 /* Counter Master Ctl Reg */
235#define TECNTL 0x160 /* Tx EOM Counter L */
236#define TECNTM 0x161 /* Tx EOM Counter M */
237#define TECNTH 0x162 /* Tx EOM Counter H */
238#define TECCR 0x163 /* Tx EOM Counter Ctl Reg */
239#define URCNTL 0x164 /* Underrun Counter L */
240#define URCNTH 0x165 /* Underrun Counter H */
241#define URCCR 0x167 /* Underrun Counter Ctl Reg */
242#define RECNTL 0x168 /* Rx EOM Counter L */
243#define RECNTM 0x169 /* Rx EOM Counter M */
244#define RECNTH 0x16a /* Rx EOM Counter H */
245#define RECCR 0x16b /* Rx EOM Counter Ctl Reg */
246#define ORCNTL 0x16c /* Overrun Counter L */
247#define ORCNTH 0x16d /* Overrun Counter H */
248#define ORCCR 0x16f /* Overrun Counter Ctl Reg */
249#define CECNTL 0x170 /* CRC Counter L */
250#define CECNTH 0x171 /* CRC Counter H */
251#define CECCR 0x173 /* CRC Counter Ctl Reg */
252#define ABCNTL 0x174 /* Abort frame Counter L */
253#define ABCNTH 0x175 /* Abort frame Counter H */
254#define ABCCR 0x177 /* Abort frame Counter Ctl Reg */
255#define SHCNTL 0x178 /* Short frame Counter L */
256#define SHCNTH 0x179 /* Short frame Counter H */
257#define SHCCR 0x17b /* Short frame Counter Ctl Reg */
258#define RSCNTL 0x17c /* Residual bit Counter L */
259#define RSCNTH 0x17d /* Residual bit Counter H */
260#define RSCCR 0x17f /* Residual bit Counter Ctl Reg */
261
262/* Register Programming Constants */
263
264#define IR0_DMIC 0x00000001
265#define IR0_DMIB 0x00000002
266#define IR0_DMIA 0x00000004
267#define IR0_EFT 0x00000008
268#define IR0_DMAREQ 0x00010000
269#define IR0_TXINT 0x00020000
270#define IR0_RXINTB 0x00040000
271#define IR0_RXINTA 0x00080000
272#define IR0_TXRDY 0x00100000
273#define IR0_RXRDY 0x00200000
274
275#define MD0_CRC16_0 0x00
276#define MD0_CRC16_1 0x01
277#define MD0_CRC32 0x02
278#define MD0_CRC_CCITT 0x03
279#define MD0_CRCC0 0x04
280#define MD0_CRCC1 0x08
281#define MD0_AUTO_ENA 0x10
282#define MD0_ASYNC 0x00
283#define MD0_BY_MSYNC 0x20
284#define MD0_BY_BISYNC 0x40
285#define MD0_BY_EXT 0x60
286#define MD0_BIT_SYNC 0x80
287#define MD0_TRANSP 0xc0
288
289#define MD0_HDLC 0x80 /* Bit-sync HDLC mode */
290
291#define MD0_CRC_NONE 0x00
292#define MD0_CRC_16_0 0x04
293#define MD0_CRC_16 0x05
294#define MD0_CRC_ITU32 0x06
295#define MD0_CRC_ITU 0x07
296
297#define MD1_NOADDR 0x00
298#define MD1_SADDR1 0x40
299#define MD1_SADDR2 0x80
300#define MD1_DADDR 0xc0
301
302#define MD2_NRZI_IEEE 0x40
303#define MD2_MANCHESTER 0x80
304#define MD2_FM_MARK 0xA0
305#define MD2_FM_SPACE 0xC0
306#define MD2_LOOPBACK 0x03 /* Local data Loopback */
307
308#define MD2_F_DUPLEX 0x00
309#define MD2_AUTO_ECHO 0x01
310#define MD2_LOOP_HI_Z 0x02
311#define MD2_LOOP_MIR 0x03
312#define MD2_ADPLL_X8 0x00
313#define MD2_ADPLL_X16 0x08
314#define MD2_ADPLL_X32 0x10
315#define MD2_NRZ 0x00
316#define MD2_NRZI 0x20
317#define MD2_NRZ_IEEE 0x40
318#define MD2_MANCH 0x00
319#define MD2_FM1 0x20
320#define MD2_FM0 0x40
321#define MD2_FM 0x80
322
323#define CTL_RTS 0x01
324#define CTL_DTR 0x02
325#define CTL_SYN 0x04
326#define CTL_IDLC 0x10
327#define CTL_UDRNC 0x20
328#define CTL_URSKP 0x40
329#define CTL_URCT 0x80
330
331#define CTL_NORTS 0x01
332#define CTL_NODTR 0x02
333#define CTL_IDLE 0x10
334
335#define RXS_BR0 0x01
336#define RXS_BR1 0x02
337#define RXS_BR2 0x04
338#define RXS_BR3 0x08
339#define RXS_ECLK 0x00
340#define RXS_ECLK_NS 0x20
341#define RXS_IBRG 0x40
342#define RXS_PLL1 0x50
343#define RXS_PLL2 0x60
344#define RXS_PLL3 0x70
345#define RXS_DRTXC 0x80
346
347#define TXS_BR0 0x01
348#define TXS_BR1 0x02
349#define TXS_BR2 0x04
350#define TXS_BR3 0x08
351#define TXS_ECLK 0x00
352#define TXS_IBRG 0x40
353#define TXS_RCLK 0x60
354#define TXS_DTRXC 0x80
355
356#define EXS_RES0 0x01
357#define EXS_RES1 0x02
358#define EXS_RES2 0x04
359#define EXS_TES0 0x10
360#define EXS_TES1 0x20
361#define EXS_TES2 0x40
362
363#define CLK_BRG_MASK 0x0F
364#define CLK_PIN_OUT 0x80
365#define CLK_LINE 0x00 /* clock line input */
366#define CLK_BRG 0x40 /* internal baud rate generator */
367#define CLK_TX_RXCLK 0x60 /* TX clock from RX clock */
368
369#define CMD_RX_RST 0x11
370#define CMD_RX_ENA 0x12
371#define CMD_RX_DIS 0x13
372#define CMD_RX_CRC_INIT 0x14
373#define CMD_RX_MSG_REJ 0x15
374#define CMD_RX_MP_SRCH 0x16
375#define CMD_RX_CRC_EXC 0x17
376#define CMD_RX_CRC_FRC 0x18
377#define CMD_TX_RST 0x01
378#define CMD_TX_ENA 0x02
379#define CMD_TX_DISA 0x03
380#define CMD_TX_CRC_INIT 0x04
381#define CMD_TX_CRC_EXC 0x05
382#define CMD_TX_EOM 0x06
383#define CMD_TX_ABORT 0x07
384#define CMD_TX_MP_ON 0x08
385#define CMD_TX_BUF_CLR 0x09
386#define CMD_TX_DISB 0x0b
387#define CMD_CH_RST 0x21
388#define CMD_SRCH_MODE 0x31
389#define CMD_NOP 0x00
390
391#define CMD_RESET 0x21
392#define CMD_TX_ENABLE 0x02
393#define CMD_RX_ENABLE 0x12
394
395#define ST0_RXRDY 0x01
396#define ST0_TXRDY 0x02
397#define ST0_RXINTB 0x20
398#define ST0_RXINTA 0x40
399#define ST0_TXINT 0x80
400
401#define ST1_IDLE 0x01
402#define ST1_ABORT 0x02
403#define ST1_CDCD 0x04
404#define ST1_CCTS 0x08
405#define ST1_SYN_FLAG 0x10
406#define ST1_CLMD 0x20
407#define ST1_TXIDLE 0x40
408#define ST1_UDRN 0x80
409
410#define ST2_CRCE 0x04
411#define ST2_ONRN 0x08
412#define ST2_RBIT 0x10
413#define ST2_ABORT 0x20
414#define ST2_SHORT 0x40
415#define ST2_EOM 0x80
416
417#define ST3_RX_ENA 0x01
418#define ST3_TX_ENA 0x02
419#define ST3_DCD 0x04
420#define ST3_CTS 0x08
421#define ST3_SRCH_MODE 0x10
422#define ST3_SLOOP 0x20
423#define ST3_GPI 0x80
424
425#define ST4_RDNR 0x01
426#define ST4_RDCR 0x02
427#define ST4_TDNR 0x04
428#define ST4_TDCR 0x08
429#define ST4_OCLM 0x20
430#define ST4_CFT 0x40
431#define ST4_CGPI 0x80
432
433#define FST_CRCEF 0x04
434#define FST_OVRNF 0x08
435#define FST_RBIF 0x10
436#define FST_ABTF 0x20
437#define FST_SHRTF 0x40
438#define FST_EOMF 0x80
439
440#define IE0_RXRDY 0x01
441#define IE0_TXRDY 0x02
442#define IE0_RXINTB 0x20
443#define IE0_RXINTA 0x40
444#define IE0_TXINT 0x80
445#define IE0_UDRN 0x00008000 /* TX underrun MSCI interrupt enable */
446#define IE0_CDCD 0x00000400 /* CD level change interrupt enable */
447
448#define IE1_IDLD 0x01
449#define IE1_ABTD 0x02
450#define IE1_CDCD 0x04
451#define IE1_CCTS 0x08
452#define IE1_SYNCD 0x10
453#define IE1_CLMD 0x20
454#define IE1_IDL 0x40
455#define IE1_UDRN 0x80
456
457#define IE2_CRCE 0x04
458#define IE2_OVRN 0x08
459#define IE2_RBIT 0x10
460#define IE2_ABT 0x20
461#define IE2_SHRT 0x40
462#define IE2_EOM 0x80
463
464#define IE4_RDNR 0x01
465#define IE4_RDCR 0x02
466#define IE4_TDNR 0x04
467#define IE4_TDCR 0x08
468#define IE4_OCLM 0x20
469#define IE4_CFT 0x40
470#define IE4_CGPI 0x80
471
472#define FIE_CRCEF 0x04
473#define FIE_OVRNF 0x08
474#define FIE_RBIF 0x10
475#define FIE_ABTF 0x20
476#define FIE_SHRTF 0x40
477#define FIE_EOMF 0x80
478
479#define DSR_DWE 0x01
480#define DSR_DE 0x02
481#define DSR_REF 0x04
482#define DSR_UDRF 0x04
483#define DSR_COA 0x08
484#define DSR_COF 0x10
485#define DSR_BOF 0x20
486#define DSR_EOM 0x40
487#define DSR_EOT 0x80
488
489#define DIR_REF 0x04
490#define DIR_UDRF 0x04
491#define DIR_COA 0x08
492#define DIR_COF 0x10
493#define DIR_BOF 0x20
494#define DIR_EOM 0x40
495#define DIR_EOT 0x80
496
497#define DIR_REFE 0x04
498#define DIR_UDRFE 0x04
499#define DIR_COAE 0x08
500#define DIR_COFE 0x10
501#define DIR_BOFE 0x20
502#define DIR_EOME 0x40
503#define DIR_EOTE 0x80
504
505#define DMR_CNTE 0x02
506#define DMR_NF 0x04
507#define DMR_SEOME 0x08
508#define DMR_TMOD 0x10
509
510#define DMER_DME 0x80 /* DMA Master Enable */
511
512#define DCR_SW_ABT 0x01
513#define DCR_FCT_CLR 0x02
514
515#define DCR_ABORT 0x01
516#define DCR_CLEAR_EOF 0x02
517
518#define PCR_COTE 0x80
519#define PCR_PR0 0x01
520#define PCR_PR1 0x02
521#define PCR_PR2 0x04
522#define PCR_CCC 0x08
523#define PCR_BRC 0x10
524#define PCR_OSB 0x40
525#define PCR_BURST 0x80
526
527#endif /* (__HD64572_H) */
diff --git a/drivers/net/wan/hd6457x.c b/drivers/net/wan/hd6457x.c
new file mode 100644
index 000000000000..d3743321a977
--- /dev/null
+++ b/drivers/net/wan/hd6457x.c
@@ -0,0 +1,853 @@
1/*
2 * Hitachi SCA HD64570 and HD64572 common driver for Linux
3 *
4 * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * Sources of information:
11 * Hitachi HD64570 SCA User's Manual
12 * Hitachi HD64572 SCA-II User's Manual
13 *
14 * We use the following SCA memory map:
15 *
16 * Packet buffer descriptor rings - starting from winbase or win0base:
17 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
18 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
19 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
20 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
21 *
22 * Packet data buffers - starting from winbase + buff_offset:
23 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
24 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
25 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
26 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
27 */
28
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/slab.h>
32#include <linux/jiffies.h>
33#include <linux/types.h>
34#include <linux/fcntl.h>
35#include <linux/interrupt.h>
36#include <linux/in.h>
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/init.h>
40#include <linux/ioport.h>
41#include <linux/bitops.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/io.h>
46
47#include <linux/netdevice.h>
48#include <linux/skbuff.h>
49
50#include <linux/hdlc.h>
51
52#if (!defined (__HD64570_H) && !defined (__HD64572_H)) || \
53 (defined (__HD64570_H) && defined (__HD64572_H))
54#error Either hd64570.h or hd64572.h must be included
55#endif
56
57#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
58#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
59#define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
60
61#define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01)
62#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
63#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
64
65#ifdef __HD64570_H /* HD64570 */
66#define sca_outa(value, reg, card) sca_outw(value, reg, card)
67#define sca_ina(reg, card) sca_inw(reg, card)
68#define writea(value, ptr) writew(value, ptr)
69
70#else /* HD64572 */
71#define sca_outa(value, reg, card) sca_outl(value, reg, card)
72#define sca_ina(reg, card) sca_inl(reg, card)
73#define writea(value, ptr) writel(value, ptr)
74#endif
75
76static inline struct net_device *port_to_dev(port_t *port)
77{
78 return port->dev;
79}
80
81static inline int sca_intr_status(card_t *card)
82{
83 u8 result = 0;
84
85#ifdef __HD64570_H /* HD64570 */
86 u8 isr0 = sca_in(ISR0, card);
87 u8 isr1 = sca_in(ISR1, card);
88
89 if (isr1 & 0x03) result |= SCA_INTR_DMAC_RX(0);
90 if (isr1 & 0x0C) result |= SCA_INTR_DMAC_TX(0);
91 if (isr1 & 0x30) result |= SCA_INTR_DMAC_RX(1);
92 if (isr1 & 0xC0) result |= SCA_INTR_DMAC_TX(1);
93 if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
94 if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
95
96#else /* HD64572 */
97 u32 isr0 = sca_inl(ISR0, card);
98
99 if (isr0 & 0x0000000F) result |= SCA_INTR_DMAC_RX(0);
100 if (isr0 & 0x000000F0) result |= SCA_INTR_DMAC_TX(0);
101 if (isr0 & 0x00000F00) result |= SCA_INTR_DMAC_RX(1);
102 if (isr0 & 0x0000F000) result |= SCA_INTR_DMAC_TX(1);
103 if (isr0 & 0x003E0000) result |= SCA_INTR_MSCI(0);
104 if (isr0 & 0x3E000000) result |= SCA_INTR_MSCI(1);
105
106#endif /* HD64570 vs HD64572 */
107
108 if (!(result & SCA_INTR_DMAC_TX(0)))
109 if (sca_in(DSR_TX(0), card) & DSR_EOM)
110 result |= SCA_INTR_DMAC_TX(0);
111 if (!(result & SCA_INTR_DMAC_TX(1)))
112 if (sca_in(DSR_TX(1), card) & DSR_EOM)
113 result |= SCA_INTR_DMAC_TX(1);
114
115 return result;
116}
117
118static inline port_t* dev_to_port(struct net_device *dev)
119{
120 return dev_to_hdlc(dev)->priv;
121}
122
123static inline u16 next_desc(port_t *port, u16 desc, int transmit)
124{
125 return (desc + 1) % (transmit ? port_to_card(port)->tx_ring_buffers
126 : port_to_card(port)->rx_ring_buffers);
127}
128
129
130
131static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
132{
133 u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
134 u16 tx_buffs = port_to_card(port)->tx_ring_buffers;
135
136 desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
137 return log_node(port) * (rx_buffs + tx_buffs) +
138 transmit * rx_buffs + desc;
139}
140
141
142
143static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
144{
145 /* Descriptor offset always fits in 16 bytes */
146 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
147}
148
149
150
151static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, int transmit)
152{
153#ifdef PAGE0_ALWAYS_MAPPED
154 return (pkt_desc __iomem *)(win0base(port_to_card(port))
155 + desc_offset(port, desc, transmit));
156#else
157 return (pkt_desc __iomem *)(winbase(port_to_card(port))
158 + desc_offset(port, desc, transmit));
159#endif
160}
161
162
163
164static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
165{
166 return port_to_card(port)->buff_offset +
167 desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
168}
169
170
171
172static void sca_init_sync_port(port_t *port)
173{
174 card_t *card = port_to_card(port);
175 int transmit, i;
176
177 port->rxin = 0;
178 port->txin = 0;
179 port->txlast = 0;
180
181#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
182 openwin(card, 0);
183#endif
184
185 for (transmit = 0; transmit < 2; transmit++) {
186 u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port);
187 u16 buffs = transmit ? card->tx_ring_buffers
188 : card->rx_ring_buffers;
189
190 for (i = 0; i < buffs; i++) {
191 pkt_desc __iomem *desc = desc_address(port, i, transmit);
192 u16 chain_off = desc_offset(port, i + 1, transmit);
193 u32 buff_off = buffer_offset(port, i, transmit);
194
195 writea(chain_off, &desc->cp);
196 writel(buff_off, &desc->bp);
197 writew(0, &desc->len);
198 writeb(0, &desc->stat);
199 }
200
201 /* DMA disable - to halt state */
202 sca_out(0, transmit ? DSR_TX(phy_node(port)) :
203 DSR_RX(phy_node(port)), card);
204 /* software ABORT - to initial state */
205 sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
206 DCR_RX(phy_node(port)), card);
207
208#ifdef __HD64570_H
209 sca_out(0, dmac + CPB, card); /* pointer base */
210#endif
211 /* current desc addr */
212 sca_outa(desc_offset(port, 0, transmit), dmac + CDAL, card);
213 if (!transmit)
214 sca_outa(desc_offset(port, buffs - 1, transmit),
215 dmac + EDAL, card);
216 else
217 sca_outa(desc_offset(port, 0, transmit), dmac + EDAL,
218 card);
219
220 /* clear frame end interrupt counter */
221 sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) :
222 DCR_RX(phy_node(port)), card);
223
224 if (!transmit) { /* Receive */
225 /* set buffer length */
226 sca_outw(HDLC_MAX_MRU, dmac + BFLL, card);
227 /* Chain mode, Multi-frame */
228 sca_out(0x14, DMR_RX(phy_node(port)), card);
229 sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)),
230 card);
231 /* DMA enable */
232 sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
233 } else { /* Transmit */
234 /* Chain mode, Multi-frame */
235 sca_out(0x14, DMR_TX(phy_node(port)), card);
236 /* enable underflow interrupts */
237 sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
238 }
239 }
240
241 hdlc_set_carrier(!(sca_in(get_msci(port) + ST3, card) & ST3_DCD),
242 port_to_dev(port));
243}
244
245
246
247#ifdef NEED_SCA_MSCI_INTR
248/* MSCI interrupt service */
249static inline void sca_msci_intr(port_t *port)
250{
251 u16 msci = get_msci(port);
252 card_t* card = port_to_card(port);
253 u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
254
255 /* Reset MSCI TX underrun and CDCD status bit */
256 sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);
257
258 if (stat & ST1_UDRN) {
259 struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
260 stats->tx_errors++; /* TX Underrun error detected */
261 stats->tx_fifo_errors++;
262 }
263
264 if (stat & ST1_CDCD)
265 hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD),
266 port_to_dev(port));
267}
268#endif
269
270
271
272static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u16 rxin)
273{
274 struct net_device *dev = port_to_dev(port);
275 struct net_device_stats *stats = hdlc_stats(dev);
276 struct sk_buff *skb;
277 u16 len;
278 u32 buff;
279#ifndef ALL_PAGES_ALWAYS_MAPPED
280 u32 maxlen;
281 u8 page;
282#endif
283
284 len = readw(&desc->len);
285 skb = dev_alloc_skb(len);
286 if (!skb) {
287 stats->rx_dropped++;
288 return;
289 }
290
291 buff = buffer_offset(port, rxin, 0);
292#ifndef ALL_PAGES_ALWAYS_MAPPED
293 page = buff / winsize(card);
294 buff = buff % winsize(card);
295 maxlen = winsize(card) - buff;
296
297 openwin(card, page);
298
299 if (len > maxlen) {
300 memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
301 openwin(card, page + 1);
302 memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
303 } else
304#endif
305 memcpy_fromio(skb->data, winbase(card) + buff, len);
306
307#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
308 /* select pkt_desc table page back */
309 openwin(card, 0);
310#endif
311 skb_put(skb, len);
312#ifdef DEBUG_PKT
313 printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
314 debug_frame(skb);
315#endif
316 stats->rx_packets++;
317 stats->rx_bytes += skb->len;
318 dev->last_rx = jiffies;
319 skb->protocol = hdlc_type_trans(skb, dev);
320 netif_rx(skb);
321}
322
323
324
325/* Receive DMA interrupt service */
326static inline void sca_rx_intr(port_t *port)
327{
328 u16 dmac = get_dmac_rx(port);
329 card_t *card = port_to_card(port);
330 u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */
331 struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
332
333 /* Reset DSR status bits */
334 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
335 DSR_RX(phy_node(port)), card);
336
337 if (stat & DSR_BOF)
338 stats->rx_over_errors++; /* Dropped one or more frames */
339
340 while (1) {
341 u32 desc_off = desc_offset(port, port->rxin, 0);
342 pkt_desc __iomem *desc;
343 u32 cda = sca_ina(dmac + CDAL, card);
344
345 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
346 break; /* No frame received */
347
348 desc = desc_address(port, port->rxin, 0);
349 stat = readb(&desc->stat);
350 if (!(stat & ST_RX_EOM))
351 port->rxpart = 1; /* partial frame received */
352 else if ((stat & ST_ERROR_MASK) || port->rxpart) {
353 stats->rx_errors++;
354 if (stat & ST_RX_OVERRUN) stats->rx_fifo_errors++;
355 else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
356 ST_RX_RESBIT)) || port->rxpart)
357 stats->rx_frame_errors++;
358 else if (stat & ST_RX_CRC) stats->rx_crc_errors++;
359 if (stat & ST_RX_EOM)
360 port->rxpart = 0; /* received last fragment */
361 } else
362 sca_rx(card, port, desc, port->rxin);
363
364 /* Set new error descriptor address */
365 sca_outa(desc_off, dmac + EDAL, card);
366 port->rxin = next_desc(port, port->rxin, 0);
367 }
368
369 /* make sure RX DMA is enabled */
370 sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
371}
372
373
374
375/* Transmit DMA interrupt service */
376static inline void sca_tx_intr(port_t *port)
377{
378 struct net_device *dev = port_to_dev(port);
379 struct net_device_stats *stats = hdlc_stats(dev);
380 u16 dmac = get_dmac_tx(port);
381 card_t* card = port_to_card(port);
382 u8 stat;
383
384 spin_lock(&port->lock);
385
386 stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */
387
388 /* Reset DSR status bits */
389 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
390 DSR_TX(phy_node(port)), card);
391
392 while (1) {
393 pkt_desc __iomem *desc;
394
395 u32 desc_off = desc_offset(port, port->txlast, 1);
396 u32 cda = sca_ina(dmac + CDAL, card);
397 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
398 break; /* Transmitter is/will_be sending this frame */
399
400 desc = desc_address(port, port->txlast, 1);
401 stats->tx_packets++;
402 stats->tx_bytes += readw(&desc->len);
403 writeb(0, &desc->stat); /* Free descriptor */
404 port->txlast = next_desc(port, port->txlast, 1);
405 }
406
407 netif_wake_queue(dev);
408 spin_unlock(&port->lock);
409}
410
411
412
413static irqreturn_t sca_intr(int irq, void* dev_id, struct pt_regs *regs)
414{
415 card_t *card = dev_id;
416 int i;
417 u8 stat;
418 int handled = 0;
419
420#ifndef ALL_PAGES_ALWAYS_MAPPED
421 u8 page = sca_get_page(card);
422#endif
423
424 while((stat = sca_intr_status(card)) != 0) {
425 handled = 1;
426 for (i = 0; i < 2; i++) {
427 port_t *port = get_port(card, i);
428 if (port) {
429 if (stat & SCA_INTR_MSCI(i))
430 sca_msci_intr(port);
431
432 if (stat & SCA_INTR_DMAC_RX(i))
433 sca_rx_intr(port);
434
435 if (stat & SCA_INTR_DMAC_TX(i))
436 sca_tx_intr(port);
437 }
438 }
439 }
440
441#ifndef ALL_PAGES_ALWAYS_MAPPED
442 openwin(card, page); /* Restore original page */
443#endif
444 return IRQ_RETVAL(handled);
445}
446
447
448
449static void sca_set_port(port_t *port)
450{
451 card_t* card = port_to_card(port);
452 u16 msci = get_msci(port);
453 u8 md2 = sca_in(msci + MD2, card);
454 unsigned int tmc, br = 10, brv = 1024;
455
456
457 if (port->settings.clock_rate > 0) {
458 /* Try lower br for better accuracy*/
459 do {
460 br--;
461 brv >>= 1; /* brv = 2^9 = 512 max in specs */
462
463 /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
464 tmc = CLOCK_BASE / brv / port->settings.clock_rate;
465 }while (br > 1 && tmc <= 128);
466
467 if (tmc < 1) {
468 tmc = 1;
469 br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
470 brv = 1;
471 } else if (tmc > 255)
472 tmc = 256; /* tmc=0 means 256 - low baud rates */
473
474 port->settings.clock_rate = CLOCK_BASE / brv / tmc;
475 } else {
476 br = 9; /* Minimum clock rate */
477 tmc = 256; /* 8bit = 0 */
478 port->settings.clock_rate = CLOCK_BASE / (256 * 512);
479 }
480
481 port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
482 port->txs = (port->txs & ~CLK_BRG_MASK) | br;
483 port->tmc = tmc;
484
485 /* baud divisor - time constant*/
486#ifdef __HD64570_H
487 sca_out(port->tmc, msci + TMC, card);
488#else
489 sca_out(port->tmc, msci + TMCR, card);
490 sca_out(port->tmc, msci + TMCT, card);
491#endif
492
493 /* Set BRG bits */
494 sca_out(port->rxs, msci + RXS, card);
495 sca_out(port->txs, msci + TXS, card);
496
497 if (port->settings.loopback)
498 md2 |= MD2_LOOPBACK;
499 else
500 md2 &= ~MD2_LOOPBACK;
501
502 sca_out(md2, msci + MD2, card);
503
504}
505
506
507
508static void sca_open(struct net_device *dev)
509{
510 port_t *port = dev_to_port(dev);
511 card_t* card = port_to_card(port);
512 u16 msci = get_msci(port);
513 u8 md0, md2;
514
515 switch(port->encoding) {
516 case ENCODING_NRZ: md2 = MD2_NRZ; break;
517 case ENCODING_NRZI: md2 = MD2_NRZI; break;
518 case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
519 case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
520 default: md2 = MD2_MANCHESTER;
521 }
522
523 if (port->settings.loopback)
524 md2 |= MD2_LOOPBACK;
525
526 switch(port->parity) {
527 case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
528 case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
529#ifdef __HD64570_H
530 case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
531#else
532 case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
533#endif
534 case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
535 default: md0 = MD0_HDLC | MD0_CRC_NONE;
536 }
537
538 sca_out(CMD_RESET, msci + CMD, card);
539 sca_out(md0, msci + MD0, card);
540 sca_out(0x00, msci + MD1, card); /* no address field check */
541 sca_out(md2, msci + MD2, card);
542 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
543#ifdef __HD64570_H
544 sca_out(CTL_IDLE, msci + CTL, card);
545#else
546 /* Skip the rest of underrun frame */
547 sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
548#endif
549
550#ifdef __HD64570_H
551 /* Allow at least 8 bytes before requesting RX DMA operation */
552 /* TX with higher priority and possibly with shorter transfers */
553 sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
554 sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
555 sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
556#else
557 sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
558 sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
559 sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
560 sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
561 sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
562#endif
563
564/* We're using the following interrupts:
565 - TXINT (DMAC completed all transmisions, underrun or DCD change)
566 - all DMA interrupts
567*/
568
569 hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), dev);
570
571#ifdef __HD64570_H
572 /* MSCI TX INT and RX INT A IRQ enable */
573 sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
574 sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
575 sca_out(sca_in(IER0, card) | (phy_node(port) ? 0xC0 : 0x0C),
576 IER0, card); /* TXINT and RXINT */
577 /* enable DMA IRQ */
578 sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
579 IER1, card);
580#else
581 /* MSCI TXINT and RXINTA interrupt enable */
582 sca_outl(IE0_TXINT | IE0_RXINTA | IE0_UDRN | IE0_CDCD, msci + IE0,
583 card);
584 /* DMA & MSCI IRQ enable */
585 sca_outl(sca_inl(IER0, card) |
586 (phy_node(port) ? 0x0A006600 : 0x000A0066), IER0, card);
587#endif
588
589#ifdef __HD64570_H
590 sca_out(port->tmc, msci + TMC, card); /* Restore registers */
591#else
592 sca_out(port->tmc, msci + TMCR, card);
593 sca_out(port->tmc, msci + TMCT, card);
594#endif
595 sca_out(port->rxs, msci + RXS, card);
596 sca_out(port->txs, msci + TXS, card);
597 sca_out(CMD_TX_ENABLE, msci + CMD, card);
598 sca_out(CMD_RX_ENABLE, msci + CMD, card);
599
600 netif_start_queue(dev);
601}
602
603
604
605static void sca_close(struct net_device *dev)
606{
607 port_t *port = dev_to_port(dev);
608 card_t* card = port_to_card(port);
609
610 /* reset channel */
611 sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
612#ifdef __HD64570_H
613 /* disable MSCI interrupts */
614 sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
615 IER0, card);
616 /* disable DMA interrupts */
617 sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
618 IER1, card);
619#else
620 /* disable DMA & MSCI IRQ */
621 sca_outl(sca_inl(IER0, card) &
622 (phy_node(port) ? 0x00FF00FF : 0xFF00FF00), IER0, card);
623#endif
624 netif_stop_queue(dev);
625}
626
627
628
629static int sca_attach(struct net_device *dev, unsigned short encoding,
630 unsigned short parity)
631{
632 if (encoding != ENCODING_NRZ &&
633 encoding != ENCODING_NRZI &&
634 encoding != ENCODING_FM_MARK &&
635 encoding != ENCODING_FM_SPACE &&
636 encoding != ENCODING_MANCHESTER)
637 return -EINVAL;
638
639 if (parity != PARITY_NONE &&
640 parity != PARITY_CRC16_PR0 &&
641 parity != PARITY_CRC16_PR1 &&
642#ifdef __HD64570_H
643 parity != PARITY_CRC16_PR0_CCITT &&
644#else
645 parity != PARITY_CRC32_PR1_CCITT &&
646#endif
647 parity != PARITY_CRC16_PR1_CCITT)
648 return -EINVAL;
649
650 dev_to_port(dev)->encoding = encoding;
651 dev_to_port(dev)->parity = parity;
652 return 0;
653}
654
655
656
657#ifdef DEBUG_RINGS
658static void sca_dump_rings(struct net_device *dev)
659{
660 port_t *port = dev_to_port(dev);
661 card_t *card = port_to_card(port);
662 u16 cnt;
663#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
664 u8 page;
665#endif
666
667#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
668 page = sca_get_page(card);
669 openwin(card, 0);
670#endif
671
672 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
673 sca_ina(get_dmac_rx(port) + CDAL, card),
674 sca_ina(get_dmac_rx(port) + EDAL, card),
675 sca_in(DSR_RX(phy_node(port)), card), port->rxin,
676 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE?"":"in");
677 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
678 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
679
680 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
681 "last=%u %sactive",
682 sca_ina(get_dmac_tx(port) + CDAL, card),
683 sca_ina(get_dmac_tx(port) + EDAL, card),
684 sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
685 sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
686
687 for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++)
688 printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
689 printk("\n");
690
691 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, "
692 "ST: %02x %02x %02x %02x"
693#ifdef __HD64572_H
694 " %02x"
695#endif
696 ", FST: %02x CST: %02x %02x\n",
697 sca_in(get_msci(port) + MD0, card),
698 sca_in(get_msci(port) + MD1, card),
699 sca_in(get_msci(port) + MD2, card),
700 sca_in(get_msci(port) + ST0, card),
701 sca_in(get_msci(port) + ST1, card),
702 sca_in(get_msci(port) + ST2, card),
703 sca_in(get_msci(port) + ST3, card),
704#ifdef __HD64572_H
705 sca_in(get_msci(port) + ST4, card),
706#endif
707 sca_in(get_msci(port) + FST, card),
708 sca_in(get_msci(port) + CST0, card),
709 sca_in(get_msci(port) + CST1, card));
710
711#ifdef __HD64572_H
712 printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
713 sca_inl(ISR0, card), sca_inl(ISR1, card));
714#else
715 printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
716 sca_in(ISR1, card), sca_in(ISR2, card));
717#endif
718
719#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
720 openwin(card, page); /* Restore original page */
721#endif
722}
723#endif /* DEBUG_RINGS */
724
725
726
727static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
728{
729 port_t *port = dev_to_port(dev);
730 card_t *card = port_to_card(port);
731 pkt_desc __iomem *desc;
732 u32 buff, len;
733#ifndef ALL_PAGES_ALWAYS_MAPPED
734 u8 page;
735 u32 maxlen;
736#endif
737
738 spin_lock_irq(&port->lock);
739
740 desc = desc_address(port, port->txin + 1, 1);
741 if (readb(&desc->stat)) { /* allow 1 packet gap */
742 /* should never happen - previous xmit should stop queue */
743#ifdef DEBUG_PKT
744 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
745#endif
746 netif_stop_queue(dev);
747 spin_unlock_irq(&port->lock);
748 return 1; /* request packet to be queued */
749 }
750
751#ifdef DEBUG_PKT
752 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
753 debug_frame(skb);
754#endif
755
756 desc = desc_address(port, port->txin, 1);
757 buff = buffer_offset(port, port->txin, 1);
758 len = skb->len;
759#ifndef ALL_PAGES_ALWAYS_MAPPED
760 page = buff / winsize(card);
761 buff = buff % winsize(card);
762 maxlen = winsize(card) - buff;
763
764 openwin(card, page);
765 if (len > maxlen) {
766 memcpy_toio(winbase(card) + buff, skb->data, maxlen);
767 openwin(card, page + 1);
768 memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
769 }
770 else
771#endif
772 memcpy_toio(winbase(card) + buff, skb->data, len);
773
774#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
775 openwin(card, 0); /* select pkt_desc table page back */
776#endif
777 writew(len, &desc->len);
778 writeb(ST_TX_EOM, &desc->stat);
779 dev->trans_start = jiffies;
780
781 port->txin = next_desc(port, port->txin, 1);
782 sca_outa(desc_offset(port, port->txin, 1),
783 get_dmac_tx(port) + EDAL, card);
784
785 sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
786
787 desc = desc_address(port, port->txin + 1, 1);
788 if (readb(&desc->stat)) /* allow 1 packet gap */
789 netif_stop_queue(dev);
790
791 spin_unlock_irq(&port->lock);
792
793 dev_kfree_skb(skb);
794 return 0;
795}
796
797
798
799#ifdef NEED_DETECT_RAM
800static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
801{
802 /* Round RAM size to 32 bits, fill from end to start */
803 u32 i = ramsize &= ~3;
804
805#ifndef ALL_PAGES_ALWAYS_MAPPED
806 u32 size = winsize(card);
807
808 openwin(card, (i - 4) / size); /* select last window */
809#endif
810 do {
811 i -= 4;
812#ifndef ALL_PAGES_ALWAYS_MAPPED
813 if ((i + 4) % size == 0)
814 openwin(card, i / size);
815 writel(i ^ 0x12345678, rambase + i % size);
816#else
817 writel(i ^ 0x12345678, rambase + i);
818#endif
819 }while (i > 0);
820
821 for (i = 0; i < ramsize ; i += 4) {
822#ifndef ALL_PAGES_ALWAYS_MAPPED
823 if (i % size == 0)
824 openwin(card, i / size);
825
826 if (readl(rambase + i % size) != (i ^ 0x12345678))
827 break;
828#else
829 if (readl(rambase + i) != (i ^ 0x12345678))
830 break;
831#endif
832 }
833
834 return i;
835}
836#endif /* NEED_DETECT_RAM */
837
838
839
840static void __devinit sca_init(card_t *card, int wait_states)
841{
842 sca_out(wait_states, WCRL, card); /* Wait Control */
843 sca_out(wait_states, WCRM, card);
844 sca_out(wait_states, WCRH, card);
845
846 sca_out(0, DMER, card); /* DMA Master disable */
847 sca_out(0x03, PCR, card); /* DMA priority */
848 sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
849 sca_out(0, DSR_TX(0), card);
850 sca_out(0, DSR_RX(1), card);
851 sca_out(0, DSR_TX(1), card);
852 sca_out(DMER_DME, DMER, card); /* DMA Master enable */
853}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
new file mode 100644
index 000000000000..c1b6896d7007
--- /dev/null
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -0,0 +1,330 @@
1/*
2 * Generic HDLC support routines for Linux
3 * Cisco HDLC support
4 *
5 * Copyright (C) 2000 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h>
17#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/skbuff.h>
20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h>
24#include <linux/hdlc.h>
25
26#undef DEBUG_HARD_HEADER
27
28#define CISCO_MULTICAST 0x8F /* Cisco multicast address */
29#define CISCO_UNICAST 0x0F /* Cisco unicast address */
30#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
31#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
32#define CISCO_ADDR_REQ 0 /* Cisco address request */
33#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
34#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
35
36
37static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
38 u16 type, void *daddr, void *saddr,
39 unsigned int len)
40{
41 hdlc_header *data;
42#ifdef DEBUG_HARD_HEADER
43 printk(KERN_DEBUG "%s: cisco_hard_header called\n", dev->name);
44#endif
45
46 skb_push(skb, sizeof(hdlc_header));
47 data = (hdlc_header*)skb->data;
48 if (type == CISCO_KEEPALIVE)
49 data->address = CISCO_MULTICAST;
50 else
51 data->address = CISCO_UNICAST;
52 data->control = 0;
53 data->protocol = htons(type);
54
55 return sizeof(hdlc_header);
56}
57
58
59
60static void cisco_keepalive_send(struct net_device *dev, u32 type,
61 u32 par1, u32 par2)
62{
63 struct sk_buff *skb;
64 cisco_packet *data;
65
66 skb = dev_alloc_skb(sizeof(hdlc_header) + sizeof(cisco_packet));
67 if (!skb) {
68 printk(KERN_WARNING
69 "%s: Memory squeeze on cisco_keepalive_send()\n",
70 dev->name);
71 return;
72 }
73 skb_reserve(skb, 4);
74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
75 data = (cisco_packet*)skb->tail;
76
77 data->type = htonl(type);
78 data->par1 = htonl(par1);
79 data->par2 = htonl(par2);
80 data->rel = 0xFFFF;
81 /* we will need do_div here if 1000 % HZ != 0 */
82 data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
83
84 skb_put(skb, sizeof(cisco_packet));
85 skb->priority = TC_PRIO_CONTROL;
86 skb->dev = dev;
87 skb->nh.raw = skb->data;
88
89 dev_queue_xmit(skb);
90}
91
92
93
94static unsigned short cisco_type_trans(struct sk_buff *skb,
95 struct net_device *dev)
96{
97 hdlc_header *data = (hdlc_header*)skb->data;
98
99 if (skb->len < sizeof(hdlc_header))
100 return __constant_htons(ETH_P_HDLC);
101
102 if (data->address != CISCO_MULTICAST &&
103 data->address != CISCO_UNICAST)
104 return __constant_htons(ETH_P_HDLC);
105
106 switch(data->protocol) {
107 case __constant_htons(ETH_P_IP):
108 case __constant_htons(ETH_P_IPX):
109 case __constant_htons(ETH_P_IPV6):
110 skb_pull(skb, sizeof(hdlc_header));
111 return data->protocol;
112 default:
113 return __constant_htons(ETH_P_HDLC);
114 }
115}
116
117
118static int cisco_rx(struct sk_buff *skb)
119{
120 struct net_device *dev = skb->dev;
121 hdlc_device *hdlc = dev_to_hdlc(dev);
122 hdlc_header *data = (hdlc_header*)skb->data;
123 cisco_packet *cisco_data;
124 struct in_device *in_dev;
125 u32 addr, mask;
126
127 if (skb->len < sizeof(hdlc_header))
128 goto rx_error;
129
130 if (data->address != CISCO_MULTICAST &&
131 data->address != CISCO_UNICAST)
132 goto rx_error;
133
134 switch(ntohs(data->protocol)) {
135 case CISCO_SYS_INFO:
136 /* Packet is not needed, drop it. */
137 dev_kfree_skb_any(skb);
138 return NET_RX_SUCCESS;
139
140 case CISCO_KEEPALIVE:
141 if (skb->len != sizeof(hdlc_header) + CISCO_PACKET_LEN &&
142 skb->len != sizeof(hdlc_header) + CISCO_BIG_PACKET_LEN) {
143 printk(KERN_INFO "%s: Invalid length of Cisco "
144 "control packet (%d bytes)\n",
145 dev->name, skb->len);
146 goto rx_error;
147 }
148
149 cisco_data = (cisco_packet*)(skb->data + sizeof(hdlc_header));
150
151 switch(ntohl (cisco_data->type)) {
152 case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
153 in_dev = dev->ip_ptr;
154 addr = 0;
155 mask = ~0; /* is the mask correct? */
156
157 if (in_dev != NULL) {
158 struct in_ifaddr **ifap = &in_dev->ifa_list;
159
160 while (*ifap != NULL) {
161 if (strcmp(dev->name,
162 (*ifap)->ifa_label) == 0) {
163 addr = (*ifap)->ifa_local;
164 mask = (*ifap)->ifa_mask;
165 break;
166 }
167 ifap = &(*ifap)->ifa_next;
168 }
169
170 cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
171 addr, mask);
172 }
173 dev_kfree_skb_any(skb);
174 return NET_RX_SUCCESS;
175
176 case CISCO_ADDR_REPLY:
177 printk(KERN_INFO "%s: Unexpected Cisco IP address "
178 "reply\n", dev->name);
179 goto rx_error;
180
181 case CISCO_KEEPALIVE_REQ:
182 hdlc->state.cisco.rxseq = ntohl(cisco_data->par1);
183 if (hdlc->state.cisco.request_sent &&
184 ntohl(cisco_data->par2)==hdlc->state.cisco.txseq) {
185 hdlc->state.cisco.last_poll = jiffies;
186 if (!hdlc->state.cisco.up) {
187 u32 sec, min, hrs, days;
188 sec = ntohl(cisco_data->time) / 1000;
189 min = sec / 60; sec -= min * 60;
190 hrs = min / 60; min -= hrs * 60;
191 days = hrs / 24; hrs -= days * 24;
192 printk(KERN_INFO "%s: Link up (peer "
193 "uptime %ud%uh%um%us)\n",
194 dev->name, days, hrs,
195 min, sec);
196 netif_carrier_on(dev);
197 hdlc->state.cisco.up = 1;
198 }
199 }
200
201 dev_kfree_skb_any(skb);
202 return NET_RX_SUCCESS;
203 } /* switch(keepalive type) */
204 } /* switch(protocol) */
205
206 printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name,
207 data->protocol);
208 dev_kfree_skb_any(skb);
209 return NET_RX_DROP;
210
211 rx_error:
212 hdlc->stats.rx_errors++; /* Mark error */
213 dev_kfree_skb_any(skb);
214 return NET_RX_DROP;
215}
216
217
218
219static void cisco_timer(unsigned long arg)
220{
221 struct net_device *dev = (struct net_device *)arg;
222 hdlc_device *hdlc = dev_to_hdlc(dev);
223
224 if (hdlc->state.cisco.up &&
225 time_after(jiffies, hdlc->state.cisco.last_poll +
226 hdlc->state.cisco.settings.timeout * HZ)) {
227 hdlc->state.cisco.up = 0;
228 printk(KERN_INFO "%s: Link down\n", dev->name);
229 netif_carrier_off(dev);
230 }
231
232 cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ,
233 ++hdlc->state.cisco.txseq,
234 hdlc->state.cisco.rxseq);
235 hdlc->state.cisco.request_sent = 1;
236 hdlc->state.cisco.timer.expires = jiffies +
237 hdlc->state.cisco.settings.interval * HZ;
238 hdlc->state.cisco.timer.function = cisco_timer;
239 hdlc->state.cisco.timer.data = arg;
240 add_timer(&hdlc->state.cisco.timer);
241}
242
243
244
245static void cisco_start(struct net_device *dev)
246{
247 hdlc_device *hdlc = dev_to_hdlc(dev);
248 hdlc->state.cisco.up = 0;
249 hdlc->state.cisco.request_sent = 0;
250 hdlc->state.cisco.txseq = hdlc->state.cisco.rxseq = 0;
251
252 init_timer(&hdlc->state.cisco.timer);
253 hdlc->state.cisco.timer.expires = jiffies + HZ; /*First poll after 1s*/
254 hdlc->state.cisco.timer.function = cisco_timer;
255 hdlc->state.cisco.timer.data = (unsigned long)dev;
256 add_timer(&hdlc->state.cisco.timer);
257}
258
259
260
261static void cisco_stop(struct net_device *dev)
262{
263 hdlc_device *hdlc = dev_to_hdlc(dev);
264 del_timer_sync(&hdlc->state.cisco.timer);
265 if (netif_carrier_ok(dev))
266 netif_carrier_off(dev);
267 hdlc->state.cisco.up = 0;
268 hdlc->state.cisco.request_sent = 0;
269}
270
271
272
273int hdlc_cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
274{
275 cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco;
276 const size_t size = sizeof(cisco_proto);
277 cisco_proto new_settings;
278 hdlc_device *hdlc = dev_to_hdlc(dev);
279 int result;
280
281 switch (ifr->ifr_settings.type) {
282 case IF_GET_PROTO:
283 ifr->ifr_settings.type = IF_PROTO_CISCO;
284 if (ifr->ifr_settings.size < size) {
285 ifr->ifr_settings.size = size; /* data size wanted */
286 return -ENOBUFS;
287 }
288 if (copy_to_user(cisco_s, &hdlc->state.cisco.settings, size))
289 return -EFAULT;
290 return 0;
291
292 case IF_PROTO_CISCO:
293 if(!capable(CAP_NET_ADMIN))
294 return -EPERM;
295
296 if(dev->flags & IFF_UP)
297 return -EBUSY;
298
299 if (copy_from_user(&new_settings, cisco_s, size))
300 return -EFAULT;
301
302 if (new_settings.interval < 1 ||
303 new_settings.timeout < 2)
304 return -EINVAL;
305
306 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
307
308 if (result)
309 return result;
310
311 hdlc_proto_detach(hdlc);
312 memcpy(&hdlc->state.cisco.settings, &new_settings, size);
313 memset(&hdlc->proto, 0, sizeof(hdlc->proto));
314
315 hdlc->proto.start = cisco_start;
316 hdlc->proto.stop = cisco_stop;
317 hdlc->proto.netif_rx = cisco_rx;
318 hdlc->proto.type_trans = cisco_type_trans;
319 hdlc->proto.id = IF_PROTO_CISCO;
320 dev->hard_start_xmit = hdlc->xmit;
321 dev->hard_header = cisco_hard_header;
322 dev->hard_header_cache = NULL;
323 dev->type = ARPHRD_CISCO;
324 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
325 dev->addr_len = 0;
326 return 0;
327 }
328
329 return -EINVAL;
330}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
new file mode 100644
index 000000000000..7f450b51a6cb
--- /dev/null
+++ b/drivers/net/wan/hdlc_fr.c
@@ -0,0 +1,1237 @@
1/*
2 * Generic HDLC support routines for Linux
3 * Frame Relay support
4 *
5 * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 *
11
12 Theory of PVC state
13
14 DCE mode:
15
16 (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
17 0,x -> 1,1 if "link reliable" when sending FULL STATUS
18 1,1 -> 1,0 if received FULL STATUS ACK
19
20 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
21 -> 1 when "PVC up" and (exist,new) = 1,0
22
23 DTE mode:
24 (exist,new,active) = FULL STATUS if "link reliable"
25 = 0, 0, 0 if "link unreliable"
26 No LMI:
27 active = open and "link reliable"
28 exist = new = not used
29
30*/
31
32#include <linux/module.h>
33#include <linux/kernel.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/errno.h>
37#include <linux/if_arp.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
40#include <linux/pkt_sched.h>
41#include <linux/random.h>
42#include <linux/inetdevice.h>
43#include <linux/lapb.h>
44#include <linux/rtnetlink.h>
45#include <linux/etherdevice.h>
46#include <linux/hdlc.h>
47
48#undef DEBUG_PKT
49#undef DEBUG_ECN
50#undef DEBUG_LINK
51
52#define MAXLEN_LMISTAT 20 /* max size of status enquiry frame */
53
54#define PVC_STATE_NEW 0x01
55#define PVC_STATE_ACTIVE 0x02
56#define PVC_STATE_FECN 0x08 /* FECN condition */
57#define PVC_STATE_BECN 0x10 /* BECN condition */
58
59
60#define FR_UI 0x03
61#define FR_PAD 0x00
62
63#define NLPID_IP 0xCC
64#define NLPID_IPV6 0x8E
65#define NLPID_SNAP 0x80
66#define NLPID_PAD 0x00
67#define NLPID_Q933 0x08
68
69
70#define LMI_DLCI 0 /* LMI DLCI */
71#define LMI_PROTO 0x08
72#define LMI_CALLREF 0x00 /* Call Reference */
73#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI lockshift */
74#define LMI_REPTYPE 1 /* report type */
75#define LMI_CCITT_REPTYPE 0x51
76#define LMI_ALIVE 3 /* keep alive */
77#define LMI_CCITT_ALIVE 0x53
78#define LMI_PVCSTAT 7 /* pvc status */
79#define LMI_CCITT_PVCSTAT 0x57
80#define LMI_FULLREP 0 /* full report */
81#define LMI_INTEGRITY 1 /* link integrity report */
82#define LMI_SINGLE 2 /* single pvc report */
83#define LMI_STATUS_ENQUIRY 0x75
84#define LMI_STATUS 0x7D /* reply */
85
86#define LMI_REPT_LEN 1 /* report type element length */
87#define LMI_INTEG_LEN 2 /* link integrity element length */
88
89#define LMI_LENGTH 13 /* standard LMI frame length */
90#define LMI_ANSI_LENGTH 14
91
92
93typedef struct {
94#if defined(__LITTLE_ENDIAN_BITFIELD)
95 unsigned ea1: 1;
96 unsigned cr: 1;
97 unsigned dlcih: 6;
98
99 unsigned ea2: 1;
100 unsigned de: 1;
101 unsigned becn: 1;
102 unsigned fecn: 1;
103 unsigned dlcil: 4;
104#else
105 unsigned dlcih: 6;
106 unsigned cr: 1;
107 unsigned ea1: 1;
108
109 unsigned dlcil: 4;
110 unsigned fecn: 1;
111 unsigned becn: 1;
112 unsigned de: 1;
113 unsigned ea2: 1;
114#endif
115}__attribute__ ((packed)) fr_hdr;
116
117
118static inline u16 q922_to_dlci(u8 *hdr)
119{
120 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
121}
122
123
124
125static inline void dlci_to_q922(u8 *hdr, u16 dlci)
126{
127 hdr[0] = (dlci >> 2) & 0xFC;
128 hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
129}
130
131
132
133static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
134{
135 pvc_device *pvc = hdlc->state.fr.first_pvc;
136
137 while (pvc) {
138 if (pvc->dlci == dlci)
139 return pvc;
140 if (pvc->dlci > dlci)
141 return NULL; /* the listed is sorted */
142 pvc = pvc->next;
143 }
144
145 return NULL;
146}
147
148
149static inline pvc_device* add_pvc(struct net_device *dev, u16 dlci)
150{
151 hdlc_device *hdlc = dev_to_hdlc(dev);
152 pvc_device *pvc, **pvc_p = &hdlc->state.fr.first_pvc;
153
154 while (*pvc_p) {
155 if ((*pvc_p)->dlci == dlci)
156 return *pvc_p;
157 if ((*pvc_p)->dlci > dlci)
158 break; /* the list is sorted */
159 pvc_p = &(*pvc_p)->next;
160 }
161
162 pvc = kmalloc(sizeof(pvc_device), GFP_ATOMIC);
163 if (!pvc)
164 return NULL;
165
166 memset(pvc, 0, sizeof(pvc_device));
167 pvc->dlci = dlci;
168 pvc->master = dev;
169 pvc->next = *pvc_p; /* Put it in the chain */
170 *pvc_p = pvc;
171 return pvc;
172}
173
174
175static inline int pvc_is_used(pvc_device *pvc)
176{
177 return pvc->main != NULL || pvc->ether != NULL;
178}
179
180
181static inline void pvc_carrier(int on, pvc_device *pvc)
182{
183 if (on) {
184 if (pvc->main)
185 if (!netif_carrier_ok(pvc->main))
186 netif_carrier_on(pvc->main);
187 if (pvc->ether)
188 if (!netif_carrier_ok(pvc->ether))
189 netif_carrier_on(pvc->ether);
190 } else {
191 if (pvc->main)
192 if (netif_carrier_ok(pvc->main))
193 netif_carrier_off(pvc->main);
194 if (pvc->ether)
195 if (netif_carrier_ok(pvc->ether))
196 netif_carrier_off(pvc->ether);
197 }
198}
199
200
201static inline void delete_unused_pvcs(hdlc_device *hdlc)
202{
203 pvc_device **pvc_p = &hdlc->state.fr.first_pvc;
204
205 while (*pvc_p) {
206 if (!pvc_is_used(*pvc_p)) {
207 pvc_device *pvc = *pvc_p;
208 *pvc_p = pvc->next;
209 kfree(pvc);
210 continue;
211 }
212 pvc_p = &(*pvc_p)->next;
213 }
214}
215
216
217static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
218{
219 if (type == ARPHRD_ETHER)
220 return &pvc->ether;
221 else
222 return &pvc->main;
223}
224
225
226static inline u16 status_to_dlci(u8 *status, int *active, int *new)
227{
228 *new = (status[2] & 0x08) ? 1 : 0;
229 *active = (status[2] & 0x02) ? 1 : 0;
230
231 return ((status[0] & 0x3F) << 4) | ((status[1] & 0x78) >> 3);
232}
233
234
235static inline void dlci_to_status(u16 dlci, u8 *status, int active, int new)
236{
237 status[0] = (dlci >> 4) & 0x3F;
238 status[1] = ((dlci << 3) & 0x78) | 0x80;
239 status[2] = 0x80;
240
241 if (new)
242 status[2] |= 0x08;
243 else if (active)
244 status[2] |= 0x02;
245}
246
247
248
249static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
250{
251 u16 head_len;
252 struct sk_buff *skb = *skb_p;
253
254 switch (skb->protocol) {
255 case __constant_ntohs(ETH_P_IP):
256 head_len = 4;
257 skb_push(skb, head_len);
258 skb->data[3] = NLPID_IP;
259 break;
260
261 case __constant_ntohs(ETH_P_IPV6):
262 head_len = 4;
263 skb_push(skb, head_len);
264 skb->data[3] = NLPID_IPV6;
265 break;
266
267 case __constant_ntohs(LMI_PROTO):
268 head_len = 4;
269 skb_push(skb, head_len);
270 skb->data[3] = LMI_PROTO;
271 break;
272
273 case __constant_ntohs(ETH_P_802_3):
274 head_len = 10;
275 if (skb_headroom(skb) < head_len) {
276 struct sk_buff *skb2 = skb_realloc_headroom(skb,
277 head_len);
278 if (!skb2)
279 return -ENOBUFS;
280 dev_kfree_skb(skb);
281 skb = *skb_p = skb2;
282 }
283 skb_push(skb, head_len);
284 skb->data[3] = FR_PAD;
285 skb->data[4] = NLPID_SNAP;
286 skb->data[5] = FR_PAD;
287 skb->data[6] = 0x80;
288 skb->data[7] = 0xC2;
289 skb->data[8] = 0x00;
290 skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
291 break;
292
293 default:
294 head_len = 10;
295 skb_push(skb, head_len);
296 skb->data[3] = FR_PAD;
297 skb->data[4] = NLPID_SNAP;
298 skb->data[5] = FR_PAD;
299 skb->data[6] = FR_PAD;
300 skb->data[7] = FR_PAD;
301 *(u16*)(skb->data + 8) = skb->protocol;
302 }
303
304 dlci_to_q922(skb->data, dlci);
305 skb->data[2] = FR_UI;
306 return 0;
307}
308
309
310
311static int pvc_open(struct net_device *dev)
312{
313 pvc_device *pvc = dev_to_pvc(dev);
314
315 if ((pvc->master->flags & IFF_UP) == 0)
316 return -EIO; /* Master must be UP in order to activate PVC */
317
318 if (pvc->open_count++ == 0) {
319 hdlc_device *hdlc = dev_to_hdlc(pvc->master);
320 if (hdlc->state.fr.settings.lmi == LMI_NONE)
321 pvc->state.active = hdlc->carrier;
322
323 pvc_carrier(pvc->state.active, pvc);
324 hdlc->state.fr.dce_changed = 1;
325 }
326 return 0;
327}
328
329
330
331static int pvc_close(struct net_device *dev)
332{
333 pvc_device *pvc = dev_to_pvc(dev);
334
335 if (--pvc->open_count == 0) {
336 hdlc_device *hdlc = dev_to_hdlc(pvc->master);
337 if (hdlc->state.fr.settings.lmi == LMI_NONE)
338 pvc->state.active = 0;
339
340 if (hdlc->state.fr.settings.dce) {
341 hdlc->state.fr.dce_changed = 1;
342 pvc->state.active = 0;
343 }
344 }
345 return 0;
346}
347
348
349
350int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
351{
352 pvc_device *pvc = dev_to_pvc(dev);
353 fr_proto_pvc_info info;
354
355 if (ifr->ifr_settings.type == IF_GET_PROTO) {
356 if (dev->type == ARPHRD_ETHER)
357 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
358 else
359 ifr->ifr_settings.type = IF_PROTO_FR_PVC;
360
361 if (ifr->ifr_settings.size < sizeof(info)) {
362 /* data size wanted */
363 ifr->ifr_settings.size = sizeof(info);
364 return -ENOBUFS;
365 }
366
367 info.dlci = pvc->dlci;
368 memcpy(info.master, pvc->master->name, IFNAMSIZ);
369 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
370 &info, sizeof(info)))
371 return -EFAULT;
372 return 0;
373 }
374
375 return -EINVAL;
376}
377
378
379static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
380{
381 return netdev_priv(dev);
382}
383
384
385
386static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
387{
388 pvc_device *pvc = dev_to_pvc(dev);
389 struct net_device_stats *stats = pvc_get_stats(dev);
390
391 if (pvc->state.active) {
392 if (dev->type == ARPHRD_ETHER) {
393 int pad = ETH_ZLEN - skb->len;
394 if (pad > 0) { /* Pad the frame with zeros */
395 int len = skb->len;
396 if (skb_tailroom(skb) < pad)
397 if (pskb_expand_head(skb, 0, pad,
398 GFP_ATOMIC)) {
399 stats->tx_dropped++;
400 dev_kfree_skb(skb);
401 return 0;
402 }
403 skb_put(skb, pad);
404 memset(skb->data + len, 0, pad);
405 }
406 skb->protocol = __constant_htons(ETH_P_802_3);
407 }
408 if (!fr_hard_header(&skb, pvc->dlci)) {
409 stats->tx_bytes += skb->len;
410 stats->tx_packets++;
411 if (pvc->state.fecn) /* TX Congestion counter */
412 stats->tx_compressed++;
413 skb->dev = pvc->master;
414 dev_queue_xmit(skb);
415 return 0;
416 }
417 }
418
419 stats->tx_dropped++;
420 dev_kfree_skb(skb);
421 return 0;
422}
423
424
425
426static int pvc_change_mtu(struct net_device *dev, int new_mtu)
427{
428 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
429 return -EINVAL;
430 dev->mtu = new_mtu;
431 return 0;
432}
433
434
435
436static inline void fr_log_dlci_active(pvc_device *pvc)
437{
438 printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
439 pvc->master->name,
440 pvc->dlci,
441 pvc->main ? pvc->main->name : "",
442 pvc->main && pvc->ether ? " " : "",
443 pvc->ether ? pvc->ether->name : "",
444 pvc->state.new ? " new" : "",
445 !pvc->state.exist ? "deleted" :
446 pvc->state.active ? "active" : "inactive");
447}
448
449
450
451static inline u8 fr_lmi_nextseq(u8 x)
452{
453 x++;
454 return x ? x : 1;
455}
456
457
458
459static void fr_lmi_send(struct net_device *dev, int fullrep)
460{
461 hdlc_device *hdlc = dev_to_hdlc(dev);
462 struct sk_buff *skb;
463 pvc_device *pvc = hdlc->state.fr.first_pvc;
464 int len = (hdlc->state.fr.settings.lmi == LMI_ANSI) ? LMI_ANSI_LENGTH
465 : LMI_LENGTH;
466 int stat_len = 3;
467 u8 *data;
468 int i = 0;
469
470 if (hdlc->state.fr.settings.dce && fullrep) {
471 len += hdlc->state.fr.dce_pvc_count * (2 + stat_len);
472 if (len > HDLC_MAX_MRU) {
473 printk(KERN_WARNING "%s: Too many PVCs while sending "
474 "LMI full report\n", dev->name);
475 return;
476 }
477 }
478
479 skb = dev_alloc_skb(len);
480 if (!skb) {
481 printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
482 dev->name);
483 return;
484 }
485 memset(skb->data, 0, len);
486 skb_reserve(skb, 4);
487 skb->protocol = __constant_htons(LMI_PROTO);
488 fr_hard_header(&skb, LMI_DLCI);
489 data = skb->tail;
490 data[i++] = LMI_CALLREF;
491 data[i++] = hdlc->state.fr.settings.dce
492 ? LMI_STATUS : LMI_STATUS_ENQUIRY;
493 if (hdlc->state.fr.settings.lmi == LMI_ANSI)
494 data[i++] = LMI_ANSI_LOCKSHIFT;
495 data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT)
496 ? LMI_CCITT_REPTYPE : LMI_REPTYPE;
497 data[i++] = LMI_REPT_LEN;
498 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
499
500 data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT)
501 ? LMI_CCITT_ALIVE : LMI_ALIVE;
502 data[i++] = LMI_INTEG_LEN;
503 data[i++] = hdlc->state.fr.txseq =fr_lmi_nextseq(hdlc->state.fr.txseq);
504 data[i++] = hdlc->state.fr.rxseq;
505
506 if (hdlc->state.fr.settings.dce && fullrep) {
507 while (pvc) {
508 data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT)
509 ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT;
510 data[i++] = stat_len;
511
512 /* LMI start/restart */
513 if (hdlc->state.fr.reliable && !pvc->state.exist) {
514 pvc->state.exist = pvc->state.new = 1;
515 fr_log_dlci_active(pvc);
516 }
517
518 /* ifconfig PVC up */
519 if (pvc->open_count && !pvc->state.active &&
520 pvc->state.exist && !pvc->state.new) {
521 pvc_carrier(1, pvc);
522 pvc->state.active = 1;
523 fr_log_dlci_active(pvc);
524 }
525
526 dlci_to_status(pvc->dlci, data + i,
527 pvc->state.active, pvc->state.new);
528 i += stat_len;
529 pvc = pvc->next;
530 }
531 }
532
533 skb_put(skb, i);
534 skb->priority = TC_PRIO_CONTROL;
535 skb->dev = dev;
536 skb->nh.raw = skb->data;
537
538 dev_queue_xmit(skb);
539}
540
541
542
543static void fr_set_link_state(int reliable, struct net_device *dev)
544{
545 hdlc_device *hdlc = dev_to_hdlc(dev);
546 pvc_device *pvc = hdlc->state.fr.first_pvc;
547
548 hdlc->state.fr.reliable = reliable;
549 if (reliable) {
550 if (!netif_carrier_ok(dev))
551 netif_carrier_on(dev);
552
553 hdlc->state.fr.n391cnt = 0; /* Request full status */
554 hdlc->state.fr.dce_changed = 1;
555
556 if (hdlc->state.fr.settings.lmi == LMI_NONE) {
557 while (pvc) { /* Activate all PVCs */
558 pvc_carrier(1, pvc);
559 pvc->state.exist = pvc->state.active = 1;
560 pvc->state.new = 0;
561 pvc = pvc->next;
562 }
563 }
564 } else {
565 if (netif_carrier_ok(dev))
566 netif_carrier_off(dev);
567
568 while (pvc) { /* Deactivate all PVCs */
569 pvc_carrier(0, pvc);
570 pvc->state.exist = pvc->state.active = 0;
571 pvc->state.new = 0;
572 pvc = pvc->next;
573 }
574 }
575}
576
577
578
579static void fr_timer(unsigned long arg)
580{
581 struct net_device *dev = (struct net_device *)arg;
582 hdlc_device *hdlc = dev_to_hdlc(dev);
583 int i, cnt = 0, reliable;
584 u32 list;
585
586 if (hdlc->state.fr.settings.dce)
587 reliable = hdlc->state.fr.request &&
588 time_before(jiffies, hdlc->state.fr.last_poll +
589 hdlc->state.fr.settings.t392 * HZ);
590 else {
591 hdlc->state.fr.last_errors <<= 1; /* Shift the list */
592 if (hdlc->state.fr.request) {
593 if (hdlc->state.fr.reliable)
594 printk(KERN_INFO "%s: No LMI status reply "
595 "received\n", dev->name);
596 hdlc->state.fr.last_errors |= 1;
597 }
598
599 list = hdlc->state.fr.last_errors;
600 for (i = 0; i < hdlc->state.fr.settings.n393; i++, list >>= 1)
601 cnt += (list & 1); /* errors count */
602
603 reliable = (cnt < hdlc->state.fr.settings.n392);
604 }
605
606 if (hdlc->state.fr.reliable != reliable) {
607 printk(KERN_INFO "%s: Link %sreliable\n", dev->name,
608 reliable ? "" : "un");
609 fr_set_link_state(reliable, dev);
610 }
611
612 if (hdlc->state.fr.settings.dce)
613 hdlc->state.fr.timer.expires = jiffies +
614 hdlc->state.fr.settings.t392 * HZ;
615 else {
616 if (hdlc->state.fr.n391cnt)
617 hdlc->state.fr.n391cnt--;
618
619 fr_lmi_send(dev, hdlc->state.fr.n391cnt == 0);
620
621 hdlc->state.fr.last_poll = jiffies;
622 hdlc->state.fr.request = 1;
623 hdlc->state.fr.timer.expires = jiffies +
624 hdlc->state.fr.settings.t391 * HZ;
625 }
626
627 hdlc->state.fr.timer.function = fr_timer;
628 hdlc->state.fr.timer.data = arg;
629 add_timer(&hdlc->state.fr.timer);
630}
631
632
633
634static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
635{
636 hdlc_device *hdlc = dev_to_hdlc(dev);
637 int stat_len;
638 pvc_device *pvc;
639 int reptype = -1, error, no_ram;
640 u8 rxseq, txseq;
641 int i;
642
643 if (skb->len < ((hdlc->state.fr.settings.lmi == LMI_ANSI)
644 ? LMI_ANSI_LENGTH : LMI_LENGTH)) {
645 printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
646 return 1;
647 }
648
649 if (skb->data[5] != (!hdlc->state.fr.settings.dce ?
650 LMI_STATUS : LMI_STATUS_ENQUIRY)) {
651 printk(KERN_INFO "%s: LMI msgtype=%x, Not LMI status %s\n",
652 dev->name, skb->data[2],
653 hdlc->state.fr.settings.dce ? "enquiry" : "reply");
654 return 1;
655 }
656
657 i = (hdlc->state.fr.settings.lmi == LMI_ANSI) ? 7 : 6;
658
659 if (skb->data[i] !=
660 ((hdlc->state.fr.settings.lmi == LMI_CCITT)
661 ? LMI_CCITT_REPTYPE : LMI_REPTYPE)) {
662 printk(KERN_INFO "%s: Not a report type=%x\n",
663 dev->name, skb->data[i]);
664 return 1;
665 }
666 i++;
667
668 i++; /* Skip length field */
669
670 reptype = skb->data[i++];
671
672 if (skb->data[i]!=
673 ((hdlc->state.fr.settings.lmi == LMI_CCITT)
674 ? LMI_CCITT_ALIVE : LMI_ALIVE)) {
675 printk(KERN_INFO "%s: Unsupported status element=%x\n",
676 dev->name, skb->data[i]);
677 return 1;
678 }
679 i++;
680
681 i++; /* Skip length field */
682
683 hdlc->state.fr.rxseq = skb->data[i++]; /* TX sequence from peer */
684 rxseq = skb->data[i++]; /* Should confirm our sequence */
685
686 txseq = hdlc->state.fr.txseq;
687
688 if (hdlc->state.fr.settings.dce) {
689 if (reptype != LMI_FULLREP && reptype != LMI_INTEGRITY) {
690 printk(KERN_INFO "%s: Unsupported report type=%x\n",
691 dev->name, reptype);
692 return 1;
693 }
694 hdlc->state.fr.last_poll = jiffies;
695 }
696
697 error = 0;
698 if (!hdlc->state.fr.reliable)
699 error = 1;
700
701 if (rxseq == 0 || rxseq != txseq) {
702 hdlc->state.fr.n391cnt = 0; /* Ask for full report next time */
703 error = 1;
704 }
705
706 if (hdlc->state.fr.settings.dce) {
707 if (hdlc->state.fr.fullrep_sent && !error) {
708/* Stop sending full report - the last one has been confirmed by DTE */
709 hdlc->state.fr.fullrep_sent = 0;
710 pvc = hdlc->state.fr.first_pvc;
711 while (pvc) {
712 if (pvc->state.new) {
713 pvc->state.new = 0;
714
715/* Tell DTE that new PVC is now active */
716 hdlc->state.fr.dce_changed = 1;
717 }
718 pvc = pvc->next;
719 }
720 }
721
722 if (hdlc->state.fr.dce_changed) {
723 reptype = LMI_FULLREP;
724 hdlc->state.fr.fullrep_sent = 1;
725 hdlc->state.fr.dce_changed = 0;
726 }
727
728 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
729 return 0;
730 }
731
732 /* DTE */
733
734 hdlc->state.fr.request = 0; /* got response, no request pending */
735
736 if (error)
737 return 0;
738
739 if (reptype != LMI_FULLREP)
740 return 0;
741
742 stat_len = 3;
743 pvc = hdlc->state.fr.first_pvc;
744
745 while (pvc) {
746 pvc->state.deleted = 1;
747 pvc = pvc->next;
748 }
749
750 no_ram = 0;
751 while (skb->len >= i + 2 + stat_len) {
752 u16 dlci;
753 unsigned int active, new;
754
755 if (skb->data[i] != ((hdlc->state.fr.settings.lmi == LMI_CCITT)
756 ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT)) {
757 printk(KERN_WARNING "%s: Invalid PVCSTAT ID: %x\n",
758 dev->name, skb->data[i]);
759 return 1;
760 }
761 i++;
762
763 if (skb->data[i] != stat_len) {
764 printk(KERN_WARNING "%s: Invalid PVCSTAT length: %x\n",
765 dev->name, skb->data[i]);
766 return 1;
767 }
768 i++;
769
770 dlci = status_to_dlci(skb->data + i, &active, &new);
771
772 pvc = add_pvc(dev, dlci);
773
774 if (!pvc && !no_ram) {
775 printk(KERN_WARNING
776 "%s: Memory squeeze on fr_lmi_recv()\n",
777 dev->name);
778 no_ram = 1;
779 }
780
781 if (pvc) {
782 pvc->state.exist = 1;
783 pvc->state.deleted = 0;
784 if (active != pvc->state.active ||
785 new != pvc->state.new ||
786 !pvc->state.exist) {
787 pvc->state.new = new;
788 pvc->state.active = active;
789 pvc_carrier(active, pvc);
790 fr_log_dlci_active(pvc);
791 }
792 }
793
794 i += stat_len;
795 }
796
797 pvc = hdlc->state.fr.first_pvc;
798
799 while (pvc) {
800 if (pvc->state.deleted && pvc->state.exist) {
801 pvc_carrier(0, pvc);
802 pvc->state.active = pvc->state.new = 0;
803 pvc->state.exist = 0;
804 fr_log_dlci_active(pvc);
805 }
806 pvc = pvc->next;
807 }
808
809 /* Next full report after N391 polls */
810 hdlc->state.fr.n391cnt = hdlc->state.fr.settings.n391;
811
812 return 0;
813}
814
815
816
817static int fr_rx(struct sk_buff *skb)
818{
819 struct net_device *ndev = skb->dev;
820 hdlc_device *hdlc = dev_to_hdlc(ndev);
821 fr_hdr *fh = (fr_hdr*)skb->data;
822 u8 *data = skb->data;
823 u16 dlci;
824 pvc_device *pvc;
825 struct net_device *dev = NULL;
826
827 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
828 goto rx_error;
829
830 dlci = q922_to_dlci(skb->data);
831
832 if (dlci == LMI_DLCI) {
833 if (hdlc->state.fr.settings.lmi == LMI_NONE)
834 goto rx_error; /* LMI packet with no LMI? */
835
836 if (data[3] == LMI_PROTO) {
837 if (fr_lmi_recv(ndev, skb))
838 goto rx_error;
839 else {
840 dev_kfree_skb_any(skb);
841 return NET_RX_SUCCESS;
842 }
843 }
844
845 printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n",
846 ndev->name);
847 goto rx_error;
848 }
849
850 pvc = find_pvc(hdlc, dlci);
851 if (!pvc) {
852#ifdef DEBUG_PKT
853 printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
854 ndev->name, dlci);
855#endif
856 dev_kfree_skb_any(skb);
857 return NET_RX_DROP;
858 }
859
860 if (pvc->state.fecn != fh->fecn) {
861#ifdef DEBUG_ECN
862 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", ndev->name,
863 dlci, fh->fecn ? "N" : "FF");
864#endif
865 pvc->state.fecn ^= 1;
866 }
867
868 if (pvc->state.becn != fh->becn) {
869#ifdef DEBUG_ECN
870 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", ndev->name,
871 dlci, fh->becn ? "N" : "FF");
872#endif
873 pvc->state.becn ^= 1;
874 }
875
876
877 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
878 hdlc->stats.rx_dropped++;
879 return NET_RX_DROP;
880 }
881
882 if (data[3] == NLPID_IP) {
883 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
884 dev = pvc->main;
885 skb->protocol = htons(ETH_P_IP);
886
887 } else if (data[3] == NLPID_IPV6) {
888 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
889 dev = pvc->main;
890 skb->protocol = htons(ETH_P_IPV6);
891
892 } else if (skb->len > 10 && data[3] == FR_PAD &&
893 data[4] == NLPID_SNAP && data[5] == FR_PAD) {
894 u16 oui = ntohs(*(u16*)(data + 6));
895 u16 pid = ntohs(*(u16*)(data + 8));
896 skb_pull(skb, 10);
897
898 switch ((((u32)oui) << 16) | pid) {
899 case ETH_P_ARP: /* routed frame with SNAP */
900 case ETH_P_IPX:
901 case ETH_P_IP: /* a long variant */
902 case ETH_P_IPV6:
903 dev = pvc->main;
904 skb->protocol = htons(pid);
905 break;
906
907 case 0x80C20007: /* bridged Ethernet frame */
908 if ((dev = pvc->ether) != NULL)
909 skb->protocol = eth_type_trans(skb, dev);
910 break;
911
912 default:
913 printk(KERN_INFO "%s: Unsupported protocol, OUI=%x "
914 "PID=%x\n", ndev->name, oui, pid);
915 dev_kfree_skb_any(skb);
916 return NET_RX_DROP;
917 }
918 } else {
919 printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x "
920 "length = %i\n", ndev->name, data[3], skb->len);
921 dev_kfree_skb_any(skb);
922 return NET_RX_DROP;
923 }
924
925 if (dev) {
926 struct net_device_stats *stats = pvc_get_stats(dev);
927 stats->rx_packets++; /* PVC traffic */
928 stats->rx_bytes += skb->len;
929 if (pvc->state.becn)
930 stats->rx_compressed++;
931 skb->dev = dev;
932 netif_rx(skb);
933 return NET_RX_SUCCESS;
934 } else {
935 dev_kfree_skb_any(skb);
936 return NET_RX_DROP;
937 }
938
939 rx_error:
940 hdlc->stats.rx_errors++; /* Mark error */
941 dev_kfree_skb_any(skb);
942 return NET_RX_DROP;
943}
944
945
946
947static void fr_start(struct net_device *dev)
948{
949 hdlc_device *hdlc = dev_to_hdlc(dev);
950#ifdef DEBUG_LINK
951 printk(KERN_DEBUG "fr_start\n");
952#endif
953 if (hdlc->state.fr.settings.lmi != LMI_NONE) {
954 hdlc->state.fr.reliable = 0;
955 hdlc->state.fr.dce_changed = 1;
956 hdlc->state.fr.request = 0;
957 hdlc->state.fr.fullrep_sent = 0;
958 hdlc->state.fr.last_errors = 0xFFFFFFFF;
959 hdlc->state.fr.n391cnt = 0;
960 hdlc->state.fr.txseq = hdlc->state.fr.rxseq = 0;
961
962 init_timer(&hdlc->state.fr.timer);
963 /* First poll after 1 s */
964 hdlc->state.fr.timer.expires = jiffies + HZ;
965 hdlc->state.fr.timer.function = fr_timer;
966 hdlc->state.fr.timer.data = (unsigned long)dev;
967 add_timer(&hdlc->state.fr.timer);
968 } else
969 fr_set_link_state(1, dev);
970}
971
972
973
974static void fr_stop(struct net_device *dev)
975{
976 hdlc_device *hdlc = dev_to_hdlc(dev);
977#ifdef DEBUG_LINK
978 printk(KERN_DEBUG "fr_stop\n");
979#endif
980 if (hdlc->state.fr.settings.lmi != LMI_NONE)
981 del_timer_sync(&hdlc->state.fr.timer);
982 fr_set_link_state(0, dev);
983}
984
985
986
987static void fr_close(struct net_device *dev)
988{
989 hdlc_device *hdlc = dev_to_hdlc(dev);
990 pvc_device *pvc = hdlc->state.fr.first_pvc;
991
992 while (pvc) { /* Shutdown all PVCs for this FRAD */
993 if (pvc->main)
994 dev_close(pvc->main);
995 if (pvc->ether)
996 dev_close(pvc->ether);
997 pvc = pvc->next;
998 }
999}
1000
1001static void dlci_setup(struct net_device *dev)
1002{
1003 dev->type = ARPHRD_DLCI;
1004 dev->flags = IFF_POINTOPOINT;
1005 dev->hard_header_len = 10;
1006 dev->addr_len = 2;
1007}
1008
1009static int fr_add_pvc(struct net_device *master, unsigned int dlci, int type)
1010{
1011 hdlc_device *hdlc = dev_to_hdlc(master);
1012 pvc_device *pvc = NULL;
1013 struct net_device *dev;
1014 int result, used;
1015 char * prefix = "pvc%d";
1016
1017 if (type == ARPHRD_ETHER)
1018 prefix = "pvceth%d";
1019
1020 if ((pvc = add_pvc(master, dlci)) == NULL) {
1021 printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
1022 master->name);
1023 return -ENOBUFS;
1024 }
1025
1026 if (*get_dev_p(pvc, type))
1027 return -EEXIST;
1028
1029 used = pvc_is_used(pvc);
1030
1031 if (type == ARPHRD_ETHER)
1032 dev = alloc_netdev(sizeof(struct net_device_stats),
1033 "pvceth%d", ether_setup);
1034 else
1035 dev = alloc_netdev(sizeof(struct net_device_stats),
1036 "pvc%d", dlci_setup);
1037
1038 if (!dev) {
1039 printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
1040 master->name);
1041 delete_unused_pvcs(hdlc);
1042 return -ENOBUFS;
1043 }
1044
1045 if (type == ARPHRD_ETHER) {
1046 memcpy(dev->dev_addr, "\x00\x01", 2);
1047 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
1048 } else {
1049 *(u16*)dev->dev_addr = htons(dlci);
1050 dlci_to_q922(dev->broadcast, dlci);
1051 }
1052 dev->hard_start_xmit = pvc_xmit;
1053 dev->get_stats = pvc_get_stats;
1054 dev->open = pvc_open;
1055 dev->stop = pvc_close;
1056 dev->do_ioctl = pvc_ioctl;
1057 dev->change_mtu = pvc_change_mtu;
1058 dev->mtu = HDLC_MAX_MTU;
1059 dev->tx_queue_len = 0;
1060 dev->priv = pvc;
1061
1062 result = dev_alloc_name(dev, dev->name);
1063 if (result < 0) {
1064 free_netdev(dev);
1065 delete_unused_pvcs(hdlc);
1066 return result;
1067 }
1068
1069 if (register_netdevice(dev) != 0) {
1070 free_netdev(dev);
1071 delete_unused_pvcs(hdlc);
1072 return -EIO;
1073 }
1074
1075 dev->destructor = free_netdev;
1076 *get_dev_p(pvc, type) = dev;
1077 if (!used) {
1078 hdlc->state.fr.dce_changed = 1;
1079 hdlc->state.fr.dce_pvc_count++;
1080 }
1081 return 0;
1082}
1083
1084
1085
1086static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1087{
1088 pvc_device *pvc;
1089 struct net_device *dev;
1090
1091 if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1092 return -ENOENT;
1093
1094 if ((dev = *get_dev_p(pvc, type)) == NULL)
1095 return -ENOENT;
1096
1097 if (dev->flags & IFF_UP)
1098 return -EBUSY; /* PVC in use */
1099
1100 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1101 *get_dev_p(pvc, type) = NULL;
1102
1103 if (!pvc_is_used(pvc)) {
1104 hdlc->state.fr.dce_pvc_count--;
1105 hdlc->state.fr.dce_changed = 1;
1106 }
1107 delete_unused_pvcs(hdlc);
1108 return 0;
1109}
1110
1111
1112
1113static void fr_destroy(hdlc_device *hdlc)
1114{
1115 pvc_device *pvc;
1116
1117 pvc = hdlc->state.fr.first_pvc;
1118 hdlc->state.fr.first_pvc = NULL; /* All PVCs destroyed */
1119 hdlc->state.fr.dce_pvc_count = 0;
1120 hdlc->state.fr.dce_changed = 1;
1121
1122 while (pvc) {
1123 pvc_device *next = pvc->next;
1124 /* destructors will free_netdev() main and ether */
1125 if (pvc->main)
1126 unregister_netdevice(pvc->main);
1127
1128 if (pvc->ether)
1129 unregister_netdevice(pvc->ether);
1130
1131 kfree(pvc);
1132 pvc = next;
1133 }
1134}
1135
1136
1137
1138int hdlc_fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1139{
1140 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1141 const size_t size = sizeof(fr_proto);
1142 fr_proto new_settings;
1143 hdlc_device *hdlc = dev_to_hdlc(dev);
1144 fr_proto_pvc pvc;
1145 int result;
1146
1147 switch (ifr->ifr_settings.type) {
1148 case IF_GET_PROTO:
1149 ifr->ifr_settings.type = IF_PROTO_FR;
1150 if (ifr->ifr_settings.size < size) {
1151 ifr->ifr_settings.size = size; /* data size wanted */
1152 return -ENOBUFS;
1153 }
1154 if (copy_to_user(fr_s, &hdlc->state.fr.settings, size))
1155 return -EFAULT;
1156 return 0;
1157
1158 case IF_PROTO_FR:
1159 if(!capable(CAP_NET_ADMIN))
1160 return -EPERM;
1161
1162 if(dev->flags & IFF_UP)
1163 return -EBUSY;
1164
1165 if (copy_from_user(&new_settings, fr_s, size))
1166 return -EFAULT;
1167
1168 if (new_settings.lmi == LMI_DEFAULT)
1169 new_settings.lmi = LMI_ANSI;
1170
1171 if ((new_settings.lmi != LMI_NONE &&
1172 new_settings.lmi != LMI_ANSI &&
1173 new_settings.lmi != LMI_CCITT) ||
1174 new_settings.t391 < 1 ||
1175 new_settings.t392 < 2 ||
1176 new_settings.n391 < 1 ||
1177 new_settings.n392 < 1 ||
1178 new_settings.n393 < new_settings.n392 ||
1179 new_settings.n393 > 32 ||
1180 (new_settings.dce != 0 &&
1181 new_settings.dce != 1))
1182 return -EINVAL;
1183
1184 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1185 if (result)
1186 return result;
1187
1188 if (hdlc->proto.id != IF_PROTO_FR) {
1189 hdlc_proto_detach(hdlc);
1190 hdlc->state.fr.first_pvc = NULL;
1191 hdlc->state.fr.dce_pvc_count = 0;
1192 }
1193 memcpy(&hdlc->state.fr.settings, &new_settings, size);
1194 memset(&hdlc->proto, 0, sizeof(hdlc->proto));
1195
1196 hdlc->proto.close = fr_close;
1197 hdlc->proto.start = fr_start;
1198 hdlc->proto.stop = fr_stop;
1199 hdlc->proto.detach = fr_destroy;
1200 hdlc->proto.netif_rx = fr_rx;
1201 hdlc->proto.id = IF_PROTO_FR;
1202 dev->hard_start_xmit = hdlc->xmit;
1203 dev->hard_header = NULL;
1204 dev->type = ARPHRD_FRAD;
1205 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
1206 dev->addr_len = 0;
1207 return 0;
1208
1209 case IF_PROTO_FR_ADD_PVC:
1210 case IF_PROTO_FR_DEL_PVC:
1211 case IF_PROTO_FR_ADD_ETH_PVC:
1212 case IF_PROTO_FR_DEL_ETH_PVC:
1213 if(!capable(CAP_NET_ADMIN))
1214 return -EPERM;
1215
1216 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1217 sizeof(fr_proto_pvc)))
1218 return -EFAULT;
1219
1220 if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1221 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
1222
1223 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1224 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1225 result = ARPHRD_ETHER; /* bridged Ethernet device */
1226 else
1227 result = ARPHRD_DLCI;
1228
1229 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1230 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1231 return fr_add_pvc(dev, pvc.dlci, result);
1232 else
1233 return fr_del_pvc(hdlc, pvc.dlci, result);
1234 }
1235
1236 return -EINVAL;
1237}
diff --git a/drivers/net/wan/hdlc_generic.c b/drivers/net/wan/hdlc_generic.c
new file mode 100644
index 000000000000..6ed064cb4469
--- /dev/null
+++ b/drivers/net/wan/hdlc_generic.c
@@ -0,0 +1,343 @@
1/*
2 * Generic HDLC support routines for Linux
3 *
4 * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * Currently supported:
11 * * raw IP-in-HDLC
12 * * Cisco HDLC
13 * * Frame Relay with ANSI or CCITT LMI (both user and network side)
14 * * PPP
15 * * X.25
16 *
17 * Use sethdlc utility to set line parameters, protocol and PVCs
18 *
19 * How does it work:
20 * - proto.open(), close(), start(), stop() calls are serialized.
21 * The order is: open, [ start, stop ... ] close ...
22 * - proto.start() and stop() are called with spin_lock_irq held.
23 */
24
25#include <linux/config.h>
26#include <linux/module.h>
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/poll.h>
30#include <linux/errno.h>
31#include <linux/if_arp.h>
32#include <linux/init.h>
33#include <linux/skbuff.h>
34#include <linux/pkt_sched.h>
35#include <linux/inetdevice.h>
36#include <linux/lapb.h>
37#include <linux/rtnetlink.h>
38#include <linux/hdlc.h>
39
40
41static const char* version = "HDLC support module revision 1.17";
42
43#undef DEBUG_LINK
44
45
46static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
47{
48 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
49 return -EINVAL;
50 dev->mtu = new_mtu;
51 return 0;
52}
53
54
55
56static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
57{
58 return hdlc_stats(dev);
59}
60
61
62
63static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
64 struct packet_type *p)
65{
66 hdlc_device *hdlc = dev_to_hdlc(dev);
67 if (hdlc->proto.netif_rx)
68 return hdlc->proto.netif_rx(skb);
69
70 hdlc->stats.rx_dropped++; /* Shouldn't happen */
71 dev_kfree_skb(skb);
72 return NET_RX_DROP;
73}
74
75
76
77static void __hdlc_set_carrier_on(struct net_device *dev)
78{
79 hdlc_device *hdlc = dev_to_hdlc(dev);
80 if (hdlc->proto.start)
81 return hdlc->proto.start(dev);
82#ifdef DEBUG_LINK
83 if (netif_carrier_ok(dev))
84 printk(KERN_ERR "hdlc_set_carrier_on(): already on\n");
85#endif
86 netif_carrier_on(dev);
87}
88
89
90
91static void __hdlc_set_carrier_off(struct net_device *dev)
92{
93 hdlc_device *hdlc = dev_to_hdlc(dev);
94 if (hdlc->proto.stop)
95 return hdlc->proto.stop(dev);
96
97#ifdef DEBUG_LINK
98 if (!netif_carrier_ok(dev))
99 printk(KERN_ERR "hdlc_set_carrier_off(): already off\n");
100#endif
101 netif_carrier_off(dev);
102}
103
104
105
106void hdlc_set_carrier(int on, struct net_device *dev)
107{
108 hdlc_device *hdlc = dev_to_hdlc(dev);
109 unsigned long flags;
110 on = on ? 1 : 0;
111
112#ifdef DEBUG_LINK
113 printk(KERN_DEBUG "hdlc_set_carrier %i\n", on);
114#endif
115
116 spin_lock_irqsave(&hdlc->state_lock, flags);
117
118 if (hdlc->carrier == on)
119 goto carrier_exit; /* no change in DCD line level */
120
121#ifdef DEBUG_LINK
122 printk(KERN_INFO "%s: carrier %s\n", dev->name, on ? "ON" : "off");
123#endif
124 hdlc->carrier = on;
125
126 if (!hdlc->open)
127 goto carrier_exit;
128
129 if (hdlc->carrier)
130 __hdlc_set_carrier_on(dev);
131 else
132 __hdlc_set_carrier_off(dev);
133
134carrier_exit:
135 spin_unlock_irqrestore(&hdlc->state_lock, flags);
136}
137
138
139
140/* Must be called by hardware driver when HDLC device is being opened */
141int hdlc_open(struct net_device *dev)
142{
143 hdlc_device *hdlc = dev_to_hdlc(dev);
144#ifdef DEBUG_LINK
145 printk(KERN_DEBUG "hdlc_open() carrier %i open %i\n",
146 hdlc->carrier, hdlc->open);
147#endif
148
149 if (hdlc->proto.id == -1)
150 return -ENOSYS; /* no protocol attached */
151
152 if (hdlc->proto.open) {
153 int result = hdlc->proto.open(dev);
154 if (result)
155 return result;
156 }
157
158 spin_lock_irq(&hdlc->state_lock);
159
160 if (hdlc->carrier)
161 __hdlc_set_carrier_on(dev);
162
163 hdlc->open = 1;
164
165 spin_unlock_irq(&hdlc->state_lock);
166 return 0;
167}
168
169
170
171/* Must be called by hardware driver when HDLC device is being closed */
172void hdlc_close(struct net_device *dev)
173{
174 hdlc_device *hdlc = dev_to_hdlc(dev);
175#ifdef DEBUG_LINK
176 printk(KERN_DEBUG "hdlc_close() carrier %i open %i\n",
177 hdlc->carrier, hdlc->open);
178#endif
179
180 spin_lock_irq(&hdlc->state_lock);
181
182 hdlc->open = 0;
183 if (hdlc->carrier)
184 __hdlc_set_carrier_off(dev);
185
186 spin_unlock_irq(&hdlc->state_lock);
187
188 if (hdlc->proto.close)
189 hdlc->proto.close(dev);
190}
191
192
193
194#ifndef CONFIG_HDLC_RAW
195#define hdlc_raw_ioctl(dev, ifr) -ENOSYS
196#endif
197
198#ifndef CONFIG_HDLC_RAW_ETH
199#define hdlc_raw_eth_ioctl(dev, ifr) -ENOSYS
200#endif
201
202#ifndef CONFIG_HDLC_PPP
203#define hdlc_ppp_ioctl(dev, ifr) -ENOSYS
204#endif
205
206#ifndef CONFIG_HDLC_CISCO
207#define hdlc_cisco_ioctl(dev, ifr) -ENOSYS
208#endif
209
210#ifndef CONFIG_HDLC_FR
211#define hdlc_fr_ioctl(dev, ifr) -ENOSYS
212#endif
213
214#ifndef CONFIG_HDLC_X25
215#define hdlc_x25_ioctl(dev, ifr) -ENOSYS
216#endif
217
218
219int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
220{
221 hdlc_device *hdlc = dev_to_hdlc(dev);
222 unsigned int proto;
223
224 if (cmd != SIOCWANDEV)
225 return -EINVAL;
226
227 switch(ifr->ifr_settings.type) {
228 case IF_PROTO_HDLC:
229 case IF_PROTO_HDLC_ETH:
230 case IF_PROTO_PPP:
231 case IF_PROTO_CISCO:
232 case IF_PROTO_FR:
233 case IF_PROTO_X25:
234 proto = ifr->ifr_settings.type;
235 break;
236
237 default:
238 proto = hdlc->proto.id;
239 }
240
241 switch(proto) {
242 case IF_PROTO_HDLC: return hdlc_raw_ioctl(dev, ifr);
243 case IF_PROTO_HDLC_ETH: return hdlc_raw_eth_ioctl(dev, ifr);
244 case IF_PROTO_PPP: return hdlc_ppp_ioctl(dev, ifr);
245 case IF_PROTO_CISCO: return hdlc_cisco_ioctl(dev, ifr);
246 case IF_PROTO_FR: return hdlc_fr_ioctl(dev, ifr);
247 case IF_PROTO_X25: return hdlc_x25_ioctl(dev, ifr);
248 default: return -EINVAL;
249 }
250}
251
252static void hdlc_setup(struct net_device *dev)
253{
254 hdlc_device *hdlc = dev_to_hdlc(dev);
255
256 dev->get_stats = hdlc_get_stats;
257 dev->change_mtu = hdlc_change_mtu;
258 dev->mtu = HDLC_MAX_MTU;
259
260 dev->type = ARPHRD_RAWHDLC;
261 dev->hard_header_len = 16;
262
263 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
264
265 hdlc->proto.id = -1;
266 hdlc->proto.detach = NULL;
267 hdlc->carrier = 1;
268 hdlc->open = 0;
269 spin_lock_init(&hdlc->state_lock);
270}
271
272struct net_device *alloc_hdlcdev(void *priv)
273{
274 struct net_device *dev;
275 dev = alloc_netdev(sizeof(hdlc_device), "hdlc%d", hdlc_setup);
276 if (dev)
277 dev_to_hdlc(dev)->priv = priv;
278 return dev;
279}
280
281int register_hdlc_device(struct net_device *dev)
282{
283 int result = dev_alloc_name(dev, "hdlc%d");
284 if (result < 0)
285 return result;
286
287 result = register_netdev(dev);
288 if (result != 0)
289 return -EIO;
290
291 if (netif_carrier_ok(dev))
292 netif_carrier_off(dev); /* no carrier until DCD goes up */
293
294 return 0;
295}
296
297
298
299void unregister_hdlc_device(struct net_device *dev)
300{
301 rtnl_lock();
302 hdlc_proto_detach(dev_to_hdlc(dev));
303 unregister_netdevice(dev);
304 rtnl_unlock();
305}
306
307
308
309MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
310MODULE_DESCRIPTION("HDLC support module");
311MODULE_LICENSE("GPL v2");
312
313EXPORT_SYMBOL(hdlc_open);
314EXPORT_SYMBOL(hdlc_close);
315EXPORT_SYMBOL(hdlc_set_carrier);
316EXPORT_SYMBOL(hdlc_ioctl);
317EXPORT_SYMBOL(alloc_hdlcdev);
318EXPORT_SYMBOL(register_hdlc_device);
319EXPORT_SYMBOL(unregister_hdlc_device);
320
321static struct packet_type hdlc_packet_type = {
322 .type = __constant_htons(ETH_P_HDLC),
323 .func = hdlc_rcv,
324};
325
326
327static int __init hdlc_module_init(void)
328{
329 printk(KERN_INFO "%s\n", version);
330 dev_add_pack(&hdlc_packet_type);
331 return 0;
332}
333
334
335
336static void __exit hdlc_module_exit(void)
337{
338 dev_remove_pack(&hdlc_packet_type);
339}
340
341
342module_init(hdlc_module_init);
343module_exit(hdlc_module_exit);
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
new file mode 100644
index 000000000000..7cd6195a2e46
--- /dev/null
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -0,0 +1,115 @@
1/*
2 * Generic HDLC support routines for Linux
3 * Point-to-point protocol support
4 *
5 * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h>
17#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/skbuff.h>
20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h>
24#include <linux/hdlc.h>
25
26
27static int ppp_open(struct net_device *dev)
28{
29 hdlc_device *hdlc = dev_to_hdlc(dev);
30 void *old_ioctl;
31 int result;
32
33 dev->priv = &hdlc->state.ppp.syncppp_ptr;
34 hdlc->state.ppp.syncppp_ptr = &hdlc->state.ppp.pppdev;
35 hdlc->state.ppp.pppdev.dev = dev;
36
37 old_ioctl = dev->do_ioctl;
38 hdlc->state.ppp.old_change_mtu = dev->change_mtu;
39 sppp_attach(&hdlc->state.ppp.pppdev);
40 /* sppp_attach nukes them. We don't need syncppp's ioctl */
41 dev->do_ioctl = old_ioctl;
42 hdlc->state.ppp.pppdev.sppp.pp_flags &= ~PP_CISCO;
43 dev->type = ARPHRD_PPP;
44 result = sppp_open(dev);
45 if (result) {
46 sppp_detach(dev);
47 return result;
48 }
49
50 return 0;
51}
52
53
54
55static void ppp_close(struct net_device *dev)
56{
57 hdlc_device *hdlc = dev_to_hdlc(dev);
58
59 sppp_close(dev);
60 sppp_detach(dev);
61 dev->rebuild_header = NULL;
62 dev->change_mtu = hdlc->state.ppp.old_change_mtu;
63 dev->mtu = HDLC_MAX_MTU;
64 dev->hard_header_len = 16;
65}
66
67
68
69static unsigned short ppp_type_trans(struct sk_buff *skb,
70 struct net_device *dev)
71{
72 return __constant_htons(ETH_P_WAN_PPP);
73}
74
75
76
77int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
78{
79 hdlc_device *hdlc = dev_to_hdlc(dev);
80 int result;
81
82 switch (ifr->ifr_settings.type) {
83 case IF_GET_PROTO:
84 ifr->ifr_settings.type = IF_PROTO_PPP;
85 return 0; /* return protocol only, no settable parameters */
86
87 case IF_PROTO_PPP:
88 if(!capable(CAP_NET_ADMIN))
89 return -EPERM;
90
91 if(dev->flags & IFF_UP)
92 return -EBUSY;
93
94 /* no settable parameters */
95
96 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
97 if (result)
98 return result;
99
100 hdlc_proto_detach(hdlc);
101 memset(&hdlc->proto, 0, sizeof(hdlc->proto));
102
103 hdlc->proto.open = ppp_open;
104 hdlc->proto.close = ppp_close;
105 hdlc->proto.type_trans = ppp_type_trans;
106 hdlc->proto.id = IF_PROTO_PPP;
107 dev->hard_start_xmit = hdlc->xmit;
108 dev->hard_header = NULL;
109 dev->type = ARPHRD_PPP;
110 dev->addr_len = 0;
111 return 0;
112 }
113
114 return -EINVAL;
115}
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
new file mode 100644
index 000000000000..c41fb70b6929
--- /dev/null
+++ b/drivers/net/wan/hdlc_raw.c
@@ -0,0 +1,90 @@
1/*
2 * Generic HDLC support routines for Linux
3 * HDLC support
4 *
5 * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h>
17#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/skbuff.h>
20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h>
24#include <linux/hdlc.h>
25
26
27static unsigned short raw_type_trans(struct sk_buff *skb,
28 struct net_device *dev)
29{
30 return __constant_htons(ETH_P_IP);
31}
32
33
34
35int hdlc_raw_ioctl(struct net_device *dev, struct ifreq *ifr)
36{
37 raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
38 const size_t size = sizeof(raw_hdlc_proto);
39 raw_hdlc_proto new_settings;
40 hdlc_device *hdlc = dev_to_hdlc(dev);
41 int result;
42
43 switch (ifr->ifr_settings.type) {
44 case IF_GET_PROTO:
45 ifr->ifr_settings.type = IF_PROTO_HDLC;
46 if (ifr->ifr_settings.size < size) {
47 ifr->ifr_settings.size = size; /* data size wanted */
48 return -ENOBUFS;
49 }
50 if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size))
51 return -EFAULT;
52 return 0;
53
54 case IF_PROTO_HDLC:
55 if (!capable(CAP_NET_ADMIN))
56 return -EPERM;
57
58 if (dev->flags & IFF_UP)
59 return -EBUSY;
60
61 if (copy_from_user(&new_settings, raw_s, size))
62 return -EFAULT;
63
64 if (new_settings.encoding == ENCODING_DEFAULT)
65 new_settings.encoding = ENCODING_NRZ;
66
67 if (new_settings.parity == PARITY_DEFAULT)
68 new_settings.parity = PARITY_CRC16_PR1_CCITT;
69
70 result = hdlc->attach(dev, new_settings.encoding,
71 new_settings.parity);
72 if (result)
73 return result;
74
75 hdlc_proto_detach(hdlc);
76 memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size);
77 memset(&hdlc->proto, 0, sizeof(hdlc->proto));
78
79 hdlc->proto.type_trans = raw_type_trans;
80 hdlc->proto.id = IF_PROTO_HDLC;
81 dev->hard_start_xmit = hdlc->xmit;
82 dev->hard_header = NULL;
83 dev->type = ARPHRD_RAWHDLC;
84 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
85 dev->addr_len = 0;
86 return 0;
87 }
88
89 return -EINVAL;
90}
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
new file mode 100644
index 000000000000..b1285cc8fee6
--- /dev/null
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -0,0 +1,107 @@
1/*
2 * Generic HDLC support routines for Linux
3 * HDLC Ethernet emulation support
4 *
5 * Copyright (C) 2002-2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h>
17#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/skbuff.h>
20#include <linux/pkt_sched.h>
21#include <linux/random.h>
22#include <linux/inetdevice.h>
23#include <linux/lapb.h>
24#include <linux/rtnetlink.h>
25#include <linux/etherdevice.h>
26#include <linux/hdlc.h>
27
28
29static int eth_tx(struct sk_buff *skb, struct net_device *dev)
30{
31 int pad = ETH_ZLEN - skb->len;
32 if (pad > 0) { /* Pad the frame with zeros */
33 int len = skb->len;
34 if (skb_tailroom(skb) < pad)
35 if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) {
36 hdlc_stats(dev)->tx_dropped++;
37 dev_kfree_skb(skb);
38 return 0;
39 }
40 skb_put(skb, pad);
41 memset(skb->data + len, 0, pad);
42 }
43 return dev_to_hdlc(dev)->xmit(skb, dev);
44}
45
46
47int hdlc_raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
48{
49 raw_hdlc_proto __user *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc;
50 const size_t size = sizeof(raw_hdlc_proto);
51 raw_hdlc_proto new_settings;
52 hdlc_device *hdlc = dev_to_hdlc(dev);
53 int result;
54 void *old_ch_mtu;
55 int old_qlen;
56
57 switch (ifr->ifr_settings.type) {
58 case IF_GET_PROTO:
59 ifr->ifr_settings.type = IF_PROTO_HDLC_ETH;
60 if (ifr->ifr_settings.size < size) {
61 ifr->ifr_settings.size = size; /* data size wanted */
62 return -ENOBUFS;
63 }
64 if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size))
65 return -EFAULT;
66 return 0;
67
68 case IF_PROTO_HDLC_ETH:
69 if (!capable(CAP_NET_ADMIN))
70 return -EPERM;
71
72 if (dev->flags & IFF_UP)
73 return -EBUSY;
74
75 if (copy_from_user(&new_settings, raw_s, size))
76 return -EFAULT;
77
78 if (new_settings.encoding == ENCODING_DEFAULT)
79 new_settings.encoding = ENCODING_NRZ;
80
81 if (new_settings.parity == PARITY_DEFAULT)
82 new_settings.parity = PARITY_CRC16_PR1_CCITT;
83
84 result = hdlc->attach(dev, new_settings.encoding,
85 new_settings.parity);
86 if (result)
87 return result;
88
89 hdlc_proto_detach(hdlc);
90 memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size);
91 memset(&hdlc->proto, 0, sizeof(hdlc->proto));
92
93 hdlc->proto.type_trans = eth_type_trans;
94 hdlc->proto.id = IF_PROTO_HDLC_ETH;
95 dev->hard_start_xmit = eth_tx;
96 old_ch_mtu = dev->change_mtu;
97 old_qlen = dev->tx_queue_len;
98 ether_setup(dev);
99 dev->change_mtu = old_ch_mtu;
100 dev->tx_queue_len = old_qlen;
101 memcpy(dev->dev_addr, "\x00\x01", 2);
102 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
103 return 0;
104 }
105
106 return -EINVAL;
107}
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
new file mode 100644
index 000000000000..07e5eef1fe0f
--- /dev/null
+++ b/drivers/net/wan/hdlc_x25.c
@@ -0,0 +1,219 @@
1/*
2 * Generic HDLC support routines for Linux
3 * X.25 support
4 *
5 * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/poll.h>
16#include <linux/errno.h>
17#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/skbuff.h>
20#include <linux/pkt_sched.h>
21#include <linux/inetdevice.h>
22#include <linux/lapb.h>
23#include <linux/rtnetlink.h>
24#include <linux/hdlc.h>
25
26#include <net/x25device.h>
27
28/* These functions are callbacks called by LAPB layer */
29
30static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
31{
32 struct sk_buff *skb;
33 unsigned char *ptr;
34
35 if ((skb = dev_alloc_skb(1)) == NULL) {
36 printk(KERN_ERR "%s: out of memory\n", dev->name);
37 return;
38 }
39
40 ptr = skb_put(skb, 1);
41 *ptr = code;
42
43 skb->protocol = x25_type_trans(skb, dev);
44 netif_rx(skb);
45}
46
47
48
49static void x25_connected(struct net_device *dev, int reason)
50{
51 x25_connect_disconnect(dev, reason, 1);
52}
53
54
55
56static void x25_disconnected(struct net_device *dev, int reason)
57{
58 x25_connect_disconnect(dev, reason, 2);
59}
60
61
62
63static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
64{
65 unsigned char *ptr;
66
67 skb_push(skb, 1);
68
69 if (skb_cow(skb, 1))
70 return NET_RX_DROP;
71
72 ptr = skb->data;
73 *ptr = 0;
74
75 skb->protocol = x25_type_trans(skb, dev);
76 return netif_rx(skb);
77}
78
79
80
81static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
82{
83 hdlc_device *hdlc = dev_to_hdlc(dev);
84 hdlc->xmit(skb, dev); /* Ignore return value :-( */
85}
86
87
88
89static int x25_xmit(struct sk_buff *skb, struct net_device *dev)
90{
91 int result;
92
93
94 /* X.25 to LAPB */
95 switch (skb->data[0]) {
96 case 0: /* Data to be transmitted */
97 skb_pull(skb, 1);
98 if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
99 dev_kfree_skb(skb);
100 return 0;
101
102 case 1:
103 if ((result = lapb_connect_request(dev))!= LAPB_OK) {
104 if (result == LAPB_CONNECTED)
105 /* Send connect confirm. msg to level 3 */
106 x25_connected(dev, 0);
107 else
108 printk(KERN_ERR "%s: LAPB connect request "
109 "failed, error code = %i\n",
110 dev->name, result);
111 }
112 break;
113
114 case 2:
115 if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
116 if (result == LAPB_NOTCONNECTED)
117 /* Send disconnect confirm. msg to level 3 */
118 x25_disconnected(dev, 0);
119 else
120 printk(KERN_ERR "%s: LAPB disconnect request "
121 "failed, error code = %i\n",
122 dev->name, result);
123 }
124 break;
125
126 default: /* to be defined */
127 break;
128 }
129
130 dev_kfree_skb(skb);
131 return 0;
132}
133
134
135
136static int x25_open(struct net_device *dev)
137{
138 struct lapb_register_struct cb;
139 int result;
140
141 cb.connect_confirmation = x25_connected;
142 cb.connect_indication = x25_connected;
143 cb.disconnect_confirmation = x25_disconnected;
144 cb.disconnect_indication = x25_disconnected;
145 cb.data_indication = x25_data_indication;
146 cb.data_transmit = x25_data_transmit;
147
148 result = lapb_register(dev, &cb);
149 if (result != LAPB_OK)
150 return result;
151 return 0;
152}
153
154
155
156static void x25_close(struct net_device *dev)
157{
158 lapb_unregister(dev);
159}
160
161
162
163static int x25_rx(struct sk_buff *skb)
164{
165 hdlc_device *hdlc = dev_to_hdlc(skb->dev);
166
167 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
168 hdlc->stats.rx_dropped++;
169 return NET_RX_DROP;
170 }
171
172 if (lapb_data_received(skb->dev, skb) == LAPB_OK)
173 return NET_RX_SUCCESS;
174
175 hdlc->stats.rx_errors++;
176 dev_kfree_skb_any(skb);
177 return NET_RX_DROP;
178}
179
180
181
182int hdlc_x25_ioctl(struct net_device *dev, struct ifreq *ifr)
183{
184 hdlc_device *hdlc = dev_to_hdlc(dev);
185 int result;
186
187 switch (ifr->ifr_settings.type) {
188 case IF_GET_PROTO:
189 ifr->ifr_settings.type = IF_PROTO_X25;
190 return 0; /* return protocol only, no settable parameters */
191
192 case IF_PROTO_X25:
193 if(!capable(CAP_NET_ADMIN))
194 return -EPERM;
195
196 if(dev->flags & IFF_UP)
197 return -EBUSY;
198
199 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
200 if (result)
201 return result;
202
203 hdlc_proto_detach(hdlc);
204 memset(&hdlc->proto, 0, sizeof(hdlc->proto));
205
206 hdlc->proto.open = x25_open;
207 hdlc->proto.close = x25_close;
208 hdlc->proto.netif_rx = x25_rx;
209 hdlc->proto.type_trans = NULL;
210 hdlc->proto.id = IF_PROTO_X25;
211 dev->hard_start_xmit = x25_xmit;
212 dev->hard_header = NULL;
213 dev->type = ARPHRD_X25;
214 dev->addr_len = 0;
215 return 0;
216 }
217
218 return -EINVAL;
219}
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
new file mode 100644
index 000000000000..7db1d1d0bb34
--- /dev/null
+++ b/drivers/net/wan/hostess_sv11.c
@@ -0,0 +1,420 @@
1/*
2 * Comtrol SV11 card driver
3 *
4 * This is a slightly odd Z85230 synchronous driver. All you need to
5 * know basically is
6 *
7 * Its a genuine Z85230
8 *
9 * It supports DMA using two DMA channels in SYNC mode. The driver doesn't
10 * use these facilities
11 *
12 * The control port is at io+1, the data at io+3 and turning off the DMA
13 * is done by writing 0 to io+4
14 *
15 * The hardware does the bus handling to avoid the need for delays between
16 * touching control registers.
17 *
18 * Port B isnt wired (why - beats me)
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/net.h>
25#include <linux/skbuff.h>
26#include <linux/netdevice.h>
27#include <linux/if_arp.h>
28#include <linux/delay.h>
29#include <linux/ioport.h>
30#include <net/arp.h>
31
32#include <asm/io.h>
33#include <asm/dma.h>
34#include <asm/byteorder.h>
35#include <net/syncppp.h>
36#include "z85230.h"
37
38static int dma;
39
40struct sv11_device
41{
42 void *if_ptr; /* General purpose pointer (used by SPPP) */
43 struct z8530_dev sync;
44 struct ppp_device netdev;
45};
46
47/*
48 * Network driver support routines
49 */
50
51/*
52 * Frame receive. Simple for our card as we do sync ppp and there
53 * is no funny garbage involved
54 */
55
56static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
57{
58 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
59 skb_trim(skb, skb->len-2);
60 skb->protocol=__constant_htons(ETH_P_WAN_PPP);
61 skb->mac.raw=skb->data;
62 skb->dev=c->netdevice;
63 /*
64 * Send it to the PPP layer. We don't have time to process
65 * it right now.
66 */
67 netif_rx(skb);
68 c->netdevice->last_rx = jiffies;
69}
70
71/*
72 * We've been placed in the UP state
73 */
74
75static int hostess_open(struct net_device *d)
76{
77 struct sv11_device *sv11=d->priv;
78 int err = -1;
79
80 /*
81 * Link layer up
82 */
83 switch(dma)
84 {
85 case 0:
86 err=z8530_sync_open(d, &sv11->sync.chanA);
87 break;
88 case 1:
89 err=z8530_sync_dma_open(d, &sv11->sync.chanA);
90 break;
91 case 2:
92 err=z8530_sync_txdma_open(d, &sv11->sync.chanA);
93 break;
94 }
95
96 if(err)
97 return err;
98 /*
99 * Begin PPP
100 */
101 err=sppp_open(d);
102 if(err)
103 {
104 switch(dma)
105 {
106 case 0:
107 z8530_sync_close(d, &sv11->sync.chanA);
108 break;
109 case 1:
110 z8530_sync_dma_close(d, &sv11->sync.chanA);
111 break;
112 case 2:
113 z8530_sync_txdma_close(d, &sv11->sync.chanA);
114 break;
115 }
116 return err;
117 }
118 sv11->sync.chanA.rx_function=hostess_input;
119
120 /*
121 * Go go go
122 */
123
124 netif_start_queue(d);
125 return 0;
126}
127
128static int hostess_close(struct net_device *d)
129{
130 struct sv11_device *sv11=d->priv;
131 /*
132 * Discard new frames
133 */
134 sv11->sync.chanA.rx_function=z8530_null_rx;
135 /*
136 * PPP off
137 */
138 sppp_close(d);
139 /*
140 * Link layer down
141 */
142 netif_stop_queue(d);
143
144 switch(dma)
145 {
146 case 0:
147 z8530_sync_close(d, &sv11->sync.chanA);
148 break;
149 case 1:
150 z8530_sync_dma_close(d, &sv11->sync.chanA);
151 break;
152 case 2:
153 z8530_sync_txdma_close(d, &sv11->sync.chanA);
154 break;
155 }
156 return 0;
157}
158
159static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
160{
161 /* struct sv11_device *sv11=d->priv;
162 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */
163 return sppp_do_ioctl(d, ifr,cmd);
164}
165
166static struct net_device_stats *hostess_get_stats(struct net_device *d)
167{
168 struct sv11_device *sv11=d->priv;
169 if(sv11)
170 return z8530_get_stats(&sv11->sync.chanA);
171 else
172 return NULL;
173}
174
175/*
176 * Passed PPP frames, fire them downwind.
177 */
178
179static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
180{
181 struct sv11_device *sv11=d->priv;
182 return z8530_queue_xmit(&sv11->sync.chanA, skb);
183}
184
185static int hostess_neigh_setup(struct neighbour *n)
186{
187 if (n->nud_state == NUD_NONE) {
188 n->ops = &arp_broken_ops;
189 n->output = n->ops->output;
190 }
191 return 0;
192}
193
194static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
195{
196 if (p->tbl->family == AF_INET) {
197 p->neigh_setup = hostess_neigh_setup;
198 p->ucast_probes = 0;
199 p->mcast_probes = 0;
200 }
201 return 0;
202}
203
204static void sv11_setup(struct net_device *dev)
205{
206 dev->open = hostess_open;
207 dev->stop = hostess_close;
208 dev->hard_start_xmit = hostess_queue_xmit;
209 dev->get_stats = hostess_get_stats;
210 dev->do_ioctl = hostess_ioctl;
211 dev->neigh_setup = hostess_neigh_setup_dev;
212}
213
214/*
215 * Description block for a Comtrol Hostess SV11 card
216 */
217
218static struct sv11_device *sv11_init(int iobase, int irq)
219{
220 struct z8530_dev *dev;
221 struct sv11_device *sv;
222
223 /*
224 * Get the needed I/O space
225 */
226
227 if(!request_region(iobase, 8, "Comtrol SV11"))
228 {
229 printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase);
230 return NULL;
231 }
232
233 sv=(struct sv11_device *)kmalloc(sizeof(struct sv11_device), GFP_KERNEL);
234 if(!sv)
235 goto fail3;
236
237 memset(sv, 0, sizeof(*sv));
238 sv->if_ptr=&sv->netdev;
239
240 sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
241 if(!sv->netdev.dev)
242 goto fail2;
243
244 SET_MODULE_OWNER(sv->netdev.dev);
245
246 dev=&sv->sync;
247
248 /*
249 * Stuff in the I/O addressing
250 */
251
252 dev->active = 0;
253
254 dev->chanA.ctrlio=iobase+1;
255 dev->chanA.dataio=iobase+3;
256 dev->chanB.ctrlio=-1;
257 dev->chanB.dataio=-1;
258 dev->chanA.irqs=&z8530_nop;
259 dev->chanB.irqs=&z8530_nop;
260
261 outb(0, iobase+4); /* DMA off */
262
263 /* We want a fast IRQ for this device. Actually we'd like an even faster
264 IRQ ;) - This is one driver RtLinux is made for */
265
266 if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT, "Hostess SV11", dev)<0)
267 {
268 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
269 goto fail1;
270 }
271
272 dev->irq=irq;
273 dev->chanA.private=sv;
274 dev->chanA.netdevice=sv->netdev.dev;
275 dev->chanA.dev=dev;
276 dev->chanB.dev=dev;
277
278 if(dma)
279 {
280 /*
281 * You can have DMA off or 1 and 3 thats the lot
282 * on the Comtrol.
283 */
284 dev->chanA.txdma=3;
285 dev->chanA.rxdma=1;
286 outb(0x03|0x08, iobase+4); /* DMA on */
287 if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0)
288 goto fail;
289
290 if(dma==1)
291 {
292 if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0)
293 goto dmafail;
294 }
295 }
296
297 /* Kill our private IRQ line the hostess can end up chattering
298 until the configuration is set */
299 disable_irq(irq);
300
301 /*
302 * Begin normal initialise
303 */
304
305 if(z8530_init(dev)!=0)
306 {
307 printk(KERN_ERR "Z8530 series device not found.\n");
308 enable_irq(irq);
309 goto dmafail2;
310 }
311 z8530_channel_load(&dev->chanB, z8530_dead_port);
312 if(dev->type==Z85C30)
313 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
314 else
315 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
316
317 enable_irq(irq);
318
319
320 /*
321 * Now we can take the IRQ
322 */
323 if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0)
324 {
325 struct net_device *d=dev->chanA.netdevice;
326
327 /*
328 * Initialise the PPP components
329 */
330 sppp_attach(&sv->netdev);
331
332 /*
333 * Local fields
334 */
335
336 d->base_addr = iobase;
337 d->irq = irq;
338 d->priv = sv;
339
340 if(register_netdev(d))
341 {
342 printk(KERN_ERR "%s: unable to register device.\n",
343 d->name);
344 sppp_detach(d);
345 goto dmafail2;
346 }
347
348 z8530_describe(dev, "I/O", iobase);
349 dev->active=1;
350 return sv;
351 }
352dmafail2:
353 if(dma==1)
354 free_dma(dev->chanA.rxdma);
355dmafail:
356 if(dma)
357 free_dma(dev->chanA.txdma);
358fail:
359 free_irq(irq, dev);
360fail1:
361 free_netdev(sv->netdev.dev);
362fail2:
363 kfree(sv);
364fail3:
365 release_region(iobase,8);
366 return NULL;
367}
368
369static void sv11_shutdown(struct sv11_device *dev)
370{
371 sppp_detach(dev->netdev.dev);
372 unregister_netdev(dev->netdev.dev);
373 z8530_shutdown(&dev->sync);
374 free_irq(dev->sync.irq, dev);
375 if(dma)
376 {
377 if(dma==1)
378 free_dma(dev->sync.chanA.rxdma);
379 free_dma(dev->sync.chanA.txdma);
380 }
381 release_region(dev->sync.chanA.ctrlio-1, 8);
382 free_netdev(dev->netdev.dev);
383 kfree(dev);
384}
385
386#ifdef MODULE
387
388static int io=0x200;
389static int irq=9;
390
391module_param(io, int, 0);
392MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
393module_param(dma, int, 0);
394MODULE_PARM_DESC(dma, "Set this to 1 to use DMA1/DMA3 for TX/RX");
395module_param(irq, int, 0);
396MODULE_PARM_DESC(irq, "The interrupt line setting for the Comtrol Hostess SV11 card");
397
398MODULE_AUTHOR("Alan Cox");
399MODULE_LICENSE("GPL");
400MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
401
402static struct sv11_device *sv11_unit;
403
404int init_module(void)
405{
406 printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n");
407 printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n");
408 if((sv11_unit=sv11_init(io,irq))==NULL)
409 return -ENODEV;
410 return 0;
411}
412
413void cleanup_module(void)
414{
415 if(sv11_unit)
416 sv11_shutdown(sv11_unit);
417}
418
419#endif
420
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
new file mode 100644
index 000000000000..7f2e3653c5e5
--- /dev/null
+++ b/drivers/net/wan/lapbether.c
@@ -0,0 +1,465 @@
1/*
2 * "LAPB via ethernet" driver release 001
3 *
4 * This code REQUIRES 2.1.15 or higher/ NET3.038
5 *
6 * This module:
7 * This module is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * This is a "pseudo" network driver to allow LAPB over Ethernet.
13 *
14 * This driver can use any ethernet destination address, and can be
15 * limited to accept frames from one dedicated ethernet card only.
16 *
17 * History
18 * LAPBETH 001 Jonathan Naylor Cloned from bpqether.c
19 * 2000-10-29 Henner Eisen lapb_data_indication() return status.
20 * 2000-11-14 Henner Eisen dev_hold/put, NETDEV_GOING_DOWN support
21 */
22
23#include <linux/errno.h>
24#include <linux/types.h>
25#include <linux/socket.h>
26#include <linux/in.h>
27#include <linux/kernel.h>
28#include <linux/string.h>
29#include <linux/net.h>
30#include <linux/inet.h>
31#include <linux/netdevice.h>
32#include <linux/if_arp.h>
33#include <linux/skbuff.h>
34#include <net/sock.h>
35#include <asm/system.h>
36#include <asm/uaccess.h>
37#include <linux/mm.h>
38#include <linux/interrupt.h>
39#include <linux/notifier.h>
40#include <linux/stat.h>
41#include <linux/netfilter.h>
42#include <linux/module.h>
43#include <linux/lapb.h>
44#include <linux/init.h>
45
46#include <net/x25device.h>
47
48static char bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
49
50/* If this number is made larger, check that the temporary string buffer
51 * in lapbeth_new_device is large enough to store the probe device name.*/
52#define MAXLAPBDEV 100
53
54struct lapbethdev {
55 struct list_head node;
56 struct net_device *ethdev; /* link to ethernet device */
57 struct net_device *axdev; /* lapbeth device (lapb#) */
58 struct net_device_stats stats; /* some statistics */
59};
60
61static struct list_head lapbeth_devices = LIST_HEAD_INIT(lapbeth_devices);
62
63/* ------------------------------------------------------------------------ */
64
65/*
66 * Get the LAPB device for the ethernet device
67 */
68static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
69{
70 struct lapbethdev *lapbeth;
71
72 list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node) {
73 if (lapbeth->ethdev == dev)
74 return lapbeth;
75 }
76 return NULL;
77}
78
79static __inline__ int dev_is_ethdev(struct net_device *dev)
80{
81 return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
82}
83
84/* ------------------------------------------------------------------------ */
85
86/*
87 * Receive a LAPB frame via an ethernet interface.
88 */
89static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype)
90{
91 int len, err;
92 struct lapbethdev *lapbeth;
93
94 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
95 return NET_RX_DROP;
96
97 if (!pskb_may_pull(skb, 2))
98 goto drop;
99
100 rcu_read_lock();
101 lapbeth = lapbeth_get_x25_dev(dev);
102 if (!lapbeth)
103 goto drop_unlock;
104 if (!netif_running(lapbeth->axdev))
105 goto drop_unlock;
106
107 lapbeth->stats.rx_packets++;
108
109 len = skb->data[0] + skb->data[1] * 256;
110 lapbeth->stats.rx_bytes += len;
111
112 skb_pull(skb, 2); /* Remove the length bytes */
113 skb_trim(skb, len); /* Set the length of the data */
114
115 if ((err = lapb_data_received(lapbeth->axdev, skb)) != LAPB_OK) {
116 printk(KERN_DEBUG "lapbether: lapb_data_received err - %d\n", err);
117 goto drop_unlock;
118 }
119out:
120 rcu_read_unlock();
121 return 0;
122drop_unlock:
123 kfree_skb(skb);
124 goto out;
125drop:
126 kfree_skb(skb);
127 return 0;
128}
129
130static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
131{
132 unsigned char *ptr;
133
134 skb_push(skb, 1);
135
136 if (skb_cow(skb, 1))
137 return NET_RX_DROP;
138
139 ptr = skb->data;
140 *ptr = 0x00;
141
142 skb->protocol = x25_type_trans(skb, dev);
143 skb->dev->last_rx = jiffies;
144 return netif_rx(skb);
145}
146
147/*
148 * Send a LAPB frame via an ethernet interface
149 */
150static int lapbeth_xmit(struct sk_buff *skb, struct net_device *dev)
151{
152 int err = -ENODEV;
153
154 /*
155 * Just to be *really* sure not to send anything if the interface
156 * is down, the ethernet device may have gone.
157 */
158 if (!netif_running(dev)) {
159 goto drop;
160 }
161
162 switch (skb->data[0]) {
163 case 0x00:
164 err = 0;
165 break;
166 case 0x01:
167 if ((err = lapb_connect_request(dev)) != LAPB_OK)
168 printk(KERN_ERR "lapbeth: lapb_connect_request "
169 "error: %d\n", err);
170 goto drop_ok;
171 case 0x02:
172 if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
173 printk(KERN_ERR "lapbeth: lapb_disconnect_request "
174 "err: %d\n", err);
175 /* Fall thru */
176 default:
177 goto drop_ok;
178 }
179
180 skb_pull(skb, 1);
181
182 if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
183 printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err);
184 err = -ENOMEM;
185 goto drop;
186 }
187 err = 0;
188out:
189 return err;
190drop_ok:
191 err = 0;
192drop:
193 kfree_skb(skb);
194 goto out;
195}
196
197static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
198{
199 struct lapbethdev *lapbeth = netdev_priv(ndev);
200 unsigned char *ptr;
201 struct net_device *dev;
202 int size = skb->len;
203
204 skb->protocol = htons(ETH_P_X25);
205
206 ptr = skb_push(skb, 2);
207
208 *ptr++ = size % 256;
209 *ptr++ = size / 256;
210
211 lapbeth->stats.tx_packets++;
212 lapbeth->stats.tx_bytes += size;
213
214 skb->dev = dev = lapbeth->ethdev;
215
216 dev->hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
217
218 dev_queue_xmit(skb);
219}
220
221static void lapbeth_connected(struct net_device *dev, int reason)
222{
223 unsigned char *ptr;
224 struct sk_buff *skb = dev_alloc_skb(1);
225
226 if (!skb) {
227 printk(KERN_ERR "lapbeth: out of memory\n");
228 return;
229 }
230
231 ptr = skb_put(skb, 1);
232 *ptr = 0x01;
233
234 skb->protocol = x25_type_trans(skb, dev);
235 skb->dev->last_rx = jiffies;
236 netif_rx(skb);
237}
238
239static void lapbeth_disconnected(struct net_device *dev, int reason)
240{
241 unsigned char *ptr;
242 struct sk_buff *skb = dev_alloc_skb(1);
243
244 if (!skb) {
245 printk(KERN_ERR "lapbeth: out of memory\n");
246 return;
247 }
248
249 ptr = skb_put(skb, 1);
250 *ptr = 0x02;
251
252 skb->protocol = x25_type_trans(skb, dev);
253 skb->dev->last_rx = jiffies;
254 netif_rx(skb);
255}
256
257/*
258 * Statistics
259 */
260static struct net_device_stats *lapbeth_get_stats(struct net_device *dev)
261{
262 struct lapbethdev *lapbeth = netdev_priv(dev);
263 return &lapbeth->stats;
264}
265
266/*
267 * Set AX.25 callsign
268 */
269static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
270{
271 struct sockaddr *sa = addr;
272 memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
273 return 0;
274}
275
276
277static struct lapb_register_struct lapbeth_callbacks = {
278 .connect_confirmation = lapbeth_connected,
279 .connect_indication = lapbeth_connected,
280 .disconnect_confirmation = lapbeth_disconnected,
281 .disconnect_indication = lapbeth_disconnected,
282 .data_indication = lapbeth_data_indication,
283 .data_transmit = lapbeth_data_transmit,
284
285};
286
287/*
288 * open/close a device
289 */
290static int lapbeth_open(struct net_device *dev)
291{
292 int err;
293
294 if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
295 printk(KERN_ERR "lapbeth: lapb_register error - %d\n", err);
296 return -ENODEV;
297 }
298
299 netif_start_queue(dev);
300 return 0;
301}
302
303static int lapbeth_close(struct net_device *dev)
304{
305 int err;
306
307 netif_stop_queue(dev);
308
309 if ((err = lapb_unregister(dev)) != LAPB_OK)
310 printk(KERN_ERR "lapbeth: lapb_unregister error - %d\n", err);
311
312 return 0;
313}
314
315/* ------------------------------------------------------------------------ */
316
317static void lapbeth_setup(struct net_device *dev)
318{
319 dev->hard_start_xmit = lapbeth_xmit;
320 dev->open = lapbeth_open;
321 dev->stop = lapbeth_close;
322 dev->destructor = free_netdev;
323 dev->set_mac_address = lapbeth_set_mac_address;
324 dev->get_stats = lapbeth_get_stats;
325 dev->type = ARPHRD_X25;
326 dev->hard_header_len = 3;
327 dev->mtu = 1000;
328 dev->addr_len = 0;
329 SET_MODULE_OWNER(dev);
330}
331
332/*
333 * Setup a new device.
334 */
335static int lapbeth_new_device(struct net_device *dev)
336{
337 struct net_device *ndev;
338 struct lapbethdev *lapbeth;
339 int rc = -ENOMEM;
340
341 ASSERT_RTNL();
342
343 ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d",
344 lapbeth_setup);
345 if (!ndev)
346 goto out;
347
348 lapbeth = netdev_priv(ndev);
349 lapbeth->axdev = ndev;
350
351 dev_hold(dev);
352 lapbeth->ethdev = dev;
353
354 rc = dev_alloc_name(ndev, ndev->name);
355 if (rc < 0)
356 goto fail;
357
358 rc = -EIO;
359 if (register_netdevice(ndev))
360 goto fail;
361
362 list_add_rcu(&lapbeth->node, &lapbeth_devices);
363 rc = 0;
364out:
365 return rc;
366fail:
367 dev_put(dev);
368 free_netdev(ndev);
369 kfree(lapbeth);
370 goto out;
371}
372
373/*
374 * Free a lapb network device.
375 */
376static void lapbeth_free_device(struct lapbethdev *lapbeth)
377{
378 dev_put(lapbeth->ethdev);
379 list_del_rcu(&lapbeth->node);
380 unregister_netdevice(lapbeth->axdev);
381}
382
383/*
384 * Handle device status changes.
385 *
386 * Called from notifier with RTNL held.
387 */
388static int lapbeth_device_event(struct notifier_block *this,
389 unsigned long event, void *ptr)
390{
391 struct lapbethdev *lapbeth;
392 struct net_device *dev = ptr;
393
394 if (!dev_is_ethdev(dev))
395 return NOTIFY_DONE;
396
397 switch (event) {
398 case NETDEV_UP:
399 /* New ethernet device -> new LAPB interface */
400 if (lapbeth_get_x25_dev(dev) == NULL)
401 lapbeth_new_device(dev);
402 break;
403 case NETDEV_DOWN:
404 /* ethernet device closed -> close LAPB interface */
405 lapbeth = lapbeth_get_x25_dev(dev);
406 if (lapbeth)
407 dev_close(lapbeth->axdev);
408 break;
409 case NETDEV_UNREGISTER:
410 /* ethernet device disappears -> remove LAPB interface */
411 lapbeth = lapbeth_get_x25_dev(dev);
412 if (lapbeth)
413 lapbeth_free_device(lapbeth);
414 break;
415 }
416
417 return NOTIFY_DONE;
418}
419
420/* ------------------------------------------------------------------------ */
421
422static struct packet_type lapbeth_packet_type = {
423 .type = __constant_htons(ETH_P_DEC),
424 .func = lapbeth_rcv,
425};
426
427static struct notifier_block lapbeth_dev_notifier = {
428 .notifier_call = lapbeth_device_event,
429};
430
431static char banner[] __initdata = KERN_INFO "LAPB Ethernet driver version 0.02\n";
432
433static int __init lapbeth_init_driver(void)
434{
435 dev_add_pack(&lapbeth_packet_type);
436
437 register_netdevice_notifier(&lapbeth_dev_notifier);
438
439 printk(banner);
440
441 return 0;
442}
443module_init(lapbeth_init_driver);
444
445static void __exit lapbeth_cleanup_driver(void)
446{
447 struct lapbethdev *lapbeth;
448 struct list_head *entry, *tmp;
449
450 dev_remove_pack(&lapbeth_packet_type);
451 unregister_netdevice_notifier(&lapbeth_dev_notifier);
452
453 rtnl_lock();
454 list_for_each_safe(entry, tmp, &lapbeth_devices) {
455 lapbeth = list_entry(entry, struct lapbethdev, node);
456
457 unregister_netdevice(lapbeth->axdev);
458 }
459 rtnl_unlock();
460}
461module_exit(lapbeth_cleanup_driver);
462
463MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>");
464MODULE_DESCRIPTION("The unofficial LAPB over Ethernet driver");
465MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile
new file mode 100644
index 000000000000..dabdcfed4efd
--- /dev/null
+++ b/drivers/net/wan/lmc/Makefile
@@ -0,0 +1,17 @@
1#
2# Makefile for the Lan Media 21140 based WAN cards
3# Specifically the 1000,1200,5200,5245
4#
5
6obj-$(CONFIG_LANMEDIA) += lmc.o
7
8lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o
9
10# Like above except every packet gets echoed to KERN_DEBUG
11# in hex
12#
13# DBDEF = \
14# -DDEBUG \
15# -DLMC_PACKET_LOG
16
17EXTRA_CFLAGS += -I. $(DBGDEF)
diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h
new file mode 100644
index 000000000000..882e58c1bfd7
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc.h
@@ -0,0 +1,33 @@
1#ifndef _LMC_H_
2#define _LMC_H_
3
4#include "lmc_var.h"
5
6/*
7 * prototypes for everyone
8 */
9int lmc_probe(struct net_device * dev);
10unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned
11 devaddr, unsigned regno);
12void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr,
13 unsigned regno, unsigned data);
14void lmc_led_on(lmc_softc_t * const, u_int32_t);
15void lmc_led_off(lmc_softc_t * const, u_int32_t);
16unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned);
17void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned);
18void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits);
19void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits);
20
21int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
22
23extern lmc_media_t lmc_ds3_media;
24extern lmc_media_t lmc_ssi_media;
25extern lmc_media_t lmc_t1_media;
26extern lmc_media_t lmc_hssi_media;
27
28#ifdef _DBG_EVENTLOG
29static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 );
30#endif
31
32#endif
33
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
new file mode 100644
index 000000000000..9dccd9546a17
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -0,0 +1,85 @@
1
2#include <linux/types.h>
3#include <linux/netdevice.h>
4#include <linux/interrupt.h>
5
6#include "lmc_debug.h"
7
8/*
9 * Prints out len, max to 80 octets using printk, 20 per line
10 */
11void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
12{
13#ifdef DEBUG
14#ifdef LMC_PACKET_LOG
15 int iNewLine = 1;
16 char str[80], *pstr;
17
18 sprintf(str, KERN_DEBUG "lmc: %s: ", type);
19 pstr = str+strlen(str);
20
21 if(iLen > 240){
22 printk(KERN_DEBUG "lmc: Printing 240 chars... out of: %d\n", iLen);
23 iLen = 240;
24 }
25 else{
26 printk(KERN_DEBUG "lmc: Printing %d chars\n", iLen);
27 }
28
29 while(iLen > 0)
30 {
31 sprintf(pstr, "%02x ", *ucData);
32 pstr+=3;
33 ucData++;
34 if( !(iNewLine % 20))
35 {
36 sprintf(pstr, "\n");
37 printk(str);
38 sprintf(str, KERN_DEBUG "lmc: %s: ", type);
39 pstr=str+strlen(str);
40 }
41 iNewLine++;
42 iLen--;
43 }
44 sprintf(pstr, "\n");
45 printk(str);
46#endif
47#endif
48}
49
50#ifdef DEBUG
51u_int32_t lmcEventLogIndex = 0;
52u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
53#endif
54
55void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3)
56{
57#ifdef DEBUG
58 lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
59 lmcEventLogBuf[lmcEventLogIndex++] = arg2;
60 lmcEventLogBuf[lmcEventLogIndex++] = arg3;
61 lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
62
63 lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
64#endif
65}
66
67void lmc_trace(struct net_device *dev, char *msg){
68#ifdef LMC_TRACE
69 unsigned long j = jiffies + 3; /* Wait for 50 ms */
70
71 if(in_interrupt()){
72 printk("%s: * %s\n", dev->name, msg);
73// while(time_before(jiffies, j+10))
74// ;
75 }
76 else {
77 printk("%s: %s\n", dev->name, msg);
78 while(time_before(jiffies, j))
79 schedule();
80 }
81#endif
82}
83
84
85/* --------------------------- end if_lmc_linux.c ------------------------ */
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
new file mode 100644
index 000000000000..cf3563859bf3
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_debug.h
@@ -0,0 +1,52 @@
1#ifndef _LMC_DEBUG_H_
2#define _LMC_DEBUG_H_
3
4#ifdef DEBUG
5#ifdef LMC_PACKET_LOG
6#define LMC_CONSOLE_LOG(x,y,z) lmcConsoleLog((x), (y), (z))
7#else
8#define LMC_CONSOLE_LOG(x,y,z)
9#endif
10#else
11#define LMC_CONSOLE_LOG(x,y,z)
12#endif
13
14
15
16/* Debug --- Event log definitions --- */
17/* EVENTLOGSIZE*EVENTLOGARGS needs to be a power of 2 */
18#define LMC_EVENTLOGSIZE 1024 /* number of events in eventlog */
19#define LMC_EVENTLOGARGS 4 /* number of args for each event */
20
21/* event indicators */
22#define LMC_EVENT_XMT 1
23#define LMC_EVENT_XMTEND 2
24#define LMC_EVENT_XMTINT 3
25#define LMC_EVENT_RCVINT 4
26#define LMC_EVENT_RCVEND 5
27#define LMC_EVENT_INT 6
28#define LMC_EVENT_XMTINTTMO 7
29#define LMC_EVENT_XMTPRCTMO 8
30#define LMC_EVENT_INTEND 9
31#define LMC_EVENT_RESET1 10
32#define LMC_EVENT_RESET2 11
33#define LMC_EVENT_FORCEDRESET 12
34#define LMC_EVENT_WATCHDOG 13
35#define LMC_EVENT_BADPKTSURGE 14
36#define LMC_EVENT_TBUSY0 15
37#define LMC_EVENT_TBUSY1 16
38
39
40#ifdef DEBUG
41extern u_int32_t lmcEventLogIndex;
42extern u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
43#define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z))
44#else
45#define LMC_EVENT_LOG(x,y,z)
46#endif /* end ifdef _DBG_EVENTLOG */
47
48void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
49void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3);
50void lmc_trace(struct net_device *dev, char *msg);
51
52#endif
diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h
new file mode 100644
index 000000000000..57dd861cd3db
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_ioctl.h
@@ -0,0 +1,257 @@
1#ifndef _LMC_IOCTL_H_
2#define _LMC_IOCTL_H_
3/* $Id: lmc_ioctl.h,v 1.15 2000/04/06 12:16:43 asj Exp $ */
4
5 /*
6 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
7 * All rights reserved. www.lanmedia.com
8 *
9 * This code is written by:
10 * Andrew Stanley-Jones (asj@cban.com)
11 * Rob Braun (bbraun@vix.com),
12 * Michael Graff (explorer@vix.com) and
13 * Matt Thomas (matt@3am-software.com).
14 *
15 * This software may be used and distributed according to the terms
16 * of the GNU General Public License version 2, incorporated herein by reference.
17 */
18
19#define LMCIOCGINFO SIOCDEVPRIVATE+3 /* get current state */
20#define LMCIOCSINFO SIOCDEVPRIVATE+4 /* set state to user values */
21#define LMCIOCGETLMCSTATS SIOCDEVPRIVATE+5
22#define LMCIOCCLEARLMCSTATS SIOCDEVPRIVATE+6
23#define LMCIOCDUMPEVENTLOG SIOCDEVPRIVATE+7
24#define LMCIOCGETXINFO SIOCDEVPRIVATE+8
25#define LMCIOCSETCIRCUIT SIOCDEVPRIVATE+9
26#define LMCIOCUNUSEDATM SIOCDEVPRIVATE+10
27#define LMCIOCRESET SIOCDEVPRIVATE+11
28#define LMCIOCT1CONTROL SIOCDEVPRIVATE+12
29#define LMCIOCIFTYPE SIOCDEVPRIVATE+13
30#define LMCIOCXILINX SIOCDEVPRIVATE+14
31
32#define LMC_CARDTYPE_UNKNOWN -1
33#define LMC_CARDTYPE_HSSI 1 /* probed card is a HSSI card */
34#define LMC_CARDTYPE_DS3 2 /* probed card is a DS3 card */
35#define LMC_CARDTYPE_SSI 3 /* probed card is a SSI card */
36#define LMC_CARDTYPE_T1 4 /* probed card is a T1 card */
37
38#define LMC_CTL_CARDTYPE_LMC5200 0 /* HSSI */
39#define LMC_CTL_CARDTYPE_LMC5245 1 /* DS3 */
40#define LMC_CTL_CARDTYPE_LMC1000 2 /* SSI, V.35 */
41#define LMC_CTL_CARDTYPE_LMC1200 3 /* DS1 */
42
43#define LMC_CTL_OFF 0 /* generic OFF value */
44#define LMC_CTL_ON 1 /* generic ON value */
45
46#define LMC_CTL_CLOCK_SOURCE_EXT 0 /* clock off line */
47#define LMC_CTL_CLOCK_SOURCE_INT 1 /* internal clock */
48
49#define LMC_CTL_CRC_LENGTH_16 16
50#define LMC_CTL_CRC_LENGTH_32 32
51#define LMC_CTL_CRC_BYTESIZE_2 2
52#define LMC_CTL_CRC_BYTESIZE_4 4
53
54
55#define LMC_CTL_CABLE_LENGTH_LT_100FT 0 /* DS3 cable < 100 feet */
56#define LMC_CTL_CABLE_LENGTH_GT_100FT 1 /* DS3 cable >= 100 feet */
57
58#define LMC_CTL_CIRCUIT_TYPE_E1 0
59#define LMC_CTL_CIRCUIT_TYPE_T1 1
60
61/*
62 * IFTYPE defines
63 */
64#define LMC_PPP 1 /* use sppp interface */
65#define LMC_NET 2 /* use direct net interface */
66#define LMC_RAW 3 /* use direct net interface */
67
68/*
69 * These are not in the least IOCTL related, but I want them common.
70 */
71/*
72 * assignments for the GPIO register on the DEC chip (common)
73 */
74#define LMC_GEP_INIT 0x01 /* 0: */
75#define LMC_GEP_RESET 0x02 /* 1: */
76#define LMC_GEP_MODE 0x10 /* 4: */
77#define LMC_GEP_DP 0x20 /* 5: */
78#define LMC_GEP_DATA 0x40 /* 6: serial out */
79#define LMC_GEP_CLK 0x80 /* 7: serial clock */
80
81/*
82 * HSSI GPIO assignments
83 */
84#define LMC_GEP_HSSI_ST 0x04 /* 2: receive timing sense (deprecated) */
85#define LMC_GEP_HSSI_CLOCK 0x08 /* 3: clock source */
86
87/*
88 * T1 GPIO assignments
89 */
90#define LMC_GEP_SSI_GENERATOR 0x04 /* 2: enable prog freq gen serial i/f */
91#define LMC_GEP_SSI_TXCLOCK 0x08 /* 3: provide clock on TXCLOCK output */
92
93/*
94 * Common MII16 bits
95 */
96#define LMC_MII16_LED0 0x0080
97#define LMC_MII16_LED1 0x0100
98#define LMC_MII16_LED2 0x0200
99#define LMC_MII16_LED3 0x0400 /* Error, and the red one */
100#define LMC_MII16_LED_ALL 0x0780 /* LED bit mask */
101#define LMC_MII16_FIFO_RESET 0x0800
102
103/*
104 * definitions for HSSI
105 */
106#define LMC_MII16_HSSI_TA 0x0001
107#define LMC_MII16_HSSI_CA 0x0002
108#define LMC_MII16_HSSI_LA 0x0004
109#define LMC_MII16_HSSI_LB 0x0008
110#define LMC_MII16_HSSI_LC 0x0010
111#define LMC_MII16_HSSI_TM 0x0020
112#define LMC_MII16_HSSI_CRC 0x0040
113
114/*
115 * assignments for the MII register 16 (DS3)
116 */
117#define LMC_MII16_DS3_ZERO 0x0001
118#define LMC_MII16_DS3_TRLBK 0x0002
119#define LMC_MII16_DS3_LNLBK 0x0004
120#define LMC_MII16_DS3_RAIS 0x0008
121#define LMC_MII16_DS3_TAIS 0x0010
122#define LMC_MII16_DS3_BIST 0x0020
123#define LMC_MII16_DS3_DLOS 0x0040
124#define LMC_MII16_DS3_CRC 0x1000
125#define LMC_MII16_DS3_SCRAM 0x2000
126#define LMC_MII16_DS3_SCRAM_LARS 0x4000
127
128/* Note: 2 pairs of LEDs where swapped by mistake
129 * in Xilinx code for DS3 & DS1 adapters */
130#define LMC_DS3_LED0 0x0100 /* bit 08 yellow */
131#define LMC_DS3_LED1 0x0080 /* bit 07 blue */
132#define LMC_DS3_LED2 0x0400 /* bit 10 green */
133#define LMC_DS3_LED3 0x0200 /* bit 09 red */
134
135/*
136 * framer register 0 and 7 (7 is latched and reset on read)
137 */
138#define LMC_FRAMER_REG0_DLOS 0x80 /* digital loss of service */
139#define LMC_FRAMER_REG0_OOFS 0x40 /* out of frame sync */
140#define LMC_FRAMER_REG0_AIS 0x20 /* alarm indication signal */
141#define LMC_FRAMER_REG0_CIS 0x10 /* channel idle */
142#define LMC_FRAMER_REG0_LOC 0x08 /* loss of clock */
143
144/*
145 * Framer register 9 contains the blue alarm signal
146 */
147#define LMC_FRAMER_REG9_RBLUE 0x02 /* Blue alarm failure */
148
149/*
150 * Framer register 0x10 contains xbit error
151 */
152#define LMC_FRAMER_REG10_XBIT 0x01 /* X bit error alarm failure */
153
154/*
155 * And SSI, LMC1000
156 */
157#define LMC_MII16_SSI_DTR 0x0001 /* DTR output RW */
158#define LMC_MII16_SSI_DSR 0x0002 /* DSR input RO */
159#define LMC_MII16_SSI_RTS 0x0004 /* RTS output RW */
160#define LMC_MII16_SSI_CTS 0x0008 /* CTS input RO */
161#define LMC_MII16_SSI_DCD 0x0010 /* DCD input RO */
162#define LMC_MII16_SSI_RI 0x0020 /* RI input RO */
163#define LMC_MII16_SSI_CRC 0x1000 /* CRC select - RW */
164
165/*
166 * bits 0x0080 through 0x0800 are generic, and described
167 * above with LMC_MII16_LED[0123] _LED_ALL, and _FIFO_RESET
168 */
169#define LMC_MII16_SSI_LL 0x1000 /* LL output RW */
170#define LMC_MII16_SSI_RL 0x2000 /* RL output RW */
171#define LMC_MII16_SSI_TM 0x4000 /* TM input RO */
172#define LMC_MII16_SSI_LOOP 0x8000 /* loopback enable RW */
173
174/*
175 * Some of the MII16 bits are mirrored in the MII17 register as well,
176 * but let's keep thing separate for now, and get only the cable from
177 * the MII17.
178 */
179#define LMC_MII17_SSI_CABLE_MASK 0x0038 /* mask to extract the cable type */
180#define LMC_MII17_SSI_CABLE_SHIFT 3 /* shift to extract the cable type */
181
182/*
183 * And T1, LMC1200
184 */
185#define LMC_MII16_T1_UNUSED1 0x0003
186#define LMC_MII16_T1_XOE 0x0004
187#define LMC_MII16_T1_RST 0x0008 /* T1 chip reset - RW */
188#define LMC_MII16_T1_Z 0x0010 /* output impedance T1=1, E1=0 output - RW */
189#define LMC_MII16_T1_INTR 0x0020 /* interrupt from 8370 - RO */
190#define LMC_MII16_T1_ONESEC 0x0040 /* one second square wave - ro */
191
192#define LMC_MII16_T1_LED0 0x0100
193#define LMC_MII16_T1_LED1 0x0080
194#define LMC_MII16_T1_LED2 0x0400
195#define LMC_MII16_T1_LED3 0x0200
196#define LMC_MII16_T1_FIFO_RESET 0x0800
197
198#define LMC_MII16_T1_CRC 0x1000 /* CRC select - RW */
199#define LMC_MII16_T1_UNUSED2 0xe000
200
201
202/* 8370 framer registers */
203
204#define T1FRAMER_ALARM1_STATUS 0x47
205#define T1FRAMER_ALARM2_STATUS 0x48
206#define T1FRAMER_FERR_LSB 0x50
207#define T1FRAMER_FERR_MSB 0x51 /* framing bit error counter */
208#define T1FRAMER_LCV_LSB 0x54
209#define T1FRAMER_LCV_MSB 0x55 /* line code violation counter */
210#define T1FRAMER_AERR 0x5A
211
212/* mask for the above AERR register */
213#define T1FRAMER_LOF_MASK (0x0f0) /* receive loss of frame */
214#define T1FRAMER_COFA_MASK (0x0c0) /* change of frame alignment */
215#define T1FRAMER_SEF_MASK (0x03) /* severely errored frame */
216
217/* 8370 framer register ALM1 (0x47) values
218 * used to determine link status
219 */
220
221#define T1F_SIGFRZ 0x01 /* signaling freeze */
222#define T1F_RLOF 0x02 /* receive loss of frame alignment */
223#define T1F_RLOS 0x04 /* receive loss of signal */
224#define T1F_RALOS 0x08 /* receive analog loss of signal or RCKI loss of clock */
225#define T1F_RAIS 0x10 /* receive alarm indication signal */
226#define T1F_UNUSED 0x20
227#define T1F_RYEL 0x40 /* receive yellow alarm */
228#define T1F_RMYEL 0x80 /* receive multiframe yellow alarm */
229
230#define LMC_T1F_WRITE 0
231#define LMC_T1F_READ 1
232
233typedef struct lmc_st1f_control {
234 int command;
235 int address;
236 int value;
237 char __user *data;
238} lmc_t1f_control;
239
240enum lmc_xilinx_c {
241 lmc_xilinx_reset = 1,
242 lmc_xilinx_load_prom = 2,
243 lmc_xilinx_load = 3
244};
245
246struct lmc_xilinx_control {
247 enum lmc_xilinx_c command;
248 int len;
249 char __user *data;
250};
251
252/* ------------------ end T1 defs ------------------- */
253
254#define LMC_MII_LedMask 0x0780
255#define LMC_MII_LedBitPos 7
256
257#endif
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
new file mode 100644
index 000000000000..15e545f66cd7
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -0,0 +1,2201 @@
1 /*
2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3 * All rights reserved. www.lanmedia.com
4 *
5 * This code is written by:
6 * Andrew Stanley-Jones (asj@cban.com)
7 * Rob Braun (bbraun@vix.com),
8 * Michael Graff (explorer@vix.com) and
9 * Matt Thomas (matt@3am-software.com).
10 *
11 * With Help By:
12 * David Boggs
13 * Ron Crane
14 * Alan Cox
15 *
16 * This software may be used and distributed according to the terms
17 * of the GNU General Public License version 2, incorporated herein by reference.
18 *
19 * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
20 *
21 * To control link specific options lmcctl is required.
22 * It can be obtained from ftp.lanmedia.com.
23 *
24 * Linux driver notes:
25 * Linux uses the device struct lmc_private to pass private information
26 * arround.
27 *
28 * The initialization portion of this driver (the lmc_reset() and the
29 * lmc_dec_reset() functions, as well as the led controls and the
30 * lmc_initcsrs() functions.
31 *
32 * The watchdog function runs every second and checks to see if
33 * we still have link, and that the timing source is what we expected
34 * it to be. If link is lost, the interface is marked down, and
35 * we no longer can transmit.
36 *
37 */
38
39/* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */
40
41#include <linux/kernel.h>
42#include <linux/module.h>
43#include <linux/string.h>
44#include <linux/timer.h>
45#include <linux/ptrace.h>
46#include <linux/errno.h>
47#include <linux/ioport.h>
48#include <linux/slab.h>
49#include <linux/interrupt.h>
50#include <linux/pci.h>
51#include <linux/delay.h>
52#include <linux/init.h>
53#include <linux/in.h>
54#include <linux/if_arp.h>
55#include <linux/netdevice.h>
56#include <linux/etherdevice.h>
57#include <linux/skbuff.h>
58#include <linux/inet.h>
59#include <linux/bitops.h>
60
61#include <net/syncppp.h>
62
63#include <asm/processor.h> /* Processor type for cache alignment. */
64#include <asm/io.h>
65#include <asm/dma.h>
66#include <asm/uaccess.h>
67//#include <asm/spinlock.h>
68
69#define DRIVER_MAJOR_VERSION 1
70#define DRIVER_MINOR_VERSION 34
71#define DRIVER_SUB_VERSION 0
72
73#define DRIVER_VERSION ((DRIVER_MAJOR_VERSION << 8) + DRIVER_MINOR_VERSION)
74
75#include "lmc.h"
76#include "lmc_var.h"
77#include "lmc_ioctl.h"
78#include "lmc_debug.h"
79#include "lmc_proto.h"
80
81static int lmc_first_load = 0;
82
83static int LMC_PKT_BUF_SZ = 1542;
84
85static struct pci_device_id lmc_pci_tbl[] = {
86 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
87 PCI_VENDOR_ID_LMC, PCI_ANY_ID },
88 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
89 PCI_ANY_ID, PCI_VENDOR_ID_LMC },
90 { 0 }
91};
92
93MODULE_DEVICE_TABLE(pci, lmc_pci_tbl);
94MODULE_LICENSE("GPL");
95
96
97static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
98static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev);
99static int lmc_rx (struct net_device *dev);
100static int lmc_open(struct net_device *dev);
101static int lmc_close(struct net_device *dev);
102static struct net_device_stats *lmc_get_stats(struct net_device *dev);
103static irqreturn_t lmc_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
104static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, size_t csr_size);
105static void lmc_softreset(lmc_softc_t * const);
106static void lmc_running_reset(struct net_device *dev);
107static int lmc_ifdown(struct net_device * const);
108static void lmc_watchdog(unsigned long data);
109static void lmc_reset(lmc_softc_t * const sc);
110static void lmc_dec_reset(lmc_softc_t * const sc);
111static void lmc_driver_timeout(struct net_device *dev);
112
113/*
114 * linux reserves 16 device specific IOCTLs. We call them
115 * LMCIOC* to control various bits of our world.
116 */
117int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
118{
119 lmc_softc_t *sc;
120 lmc_ctl_t ctl;
121 int ret;
122 u_int16_t regVal;
123 unsigned long flags;
124
125 struct sppp *sp;
126
127 ret = -EOPNOTSUPP;
128
129 sc = dev->priv;
130
131 lmc_trace(dev, "lmc_ioctl in");
132
133 /*
134 * Most functions mess with the structure
135 * Disable interrupts while we do the polling
136 */
137 spin_lock_irqsave(&sc->lmc_lock, flags);
138
139 switch (cmd) {
140 /*
141 * Return current driver state. Since we keep this up
142 * To date internally, just copy this out to the user.
143 */
144 case LMCIOCGINFO: /*fold01*/
145 if (copy_to_user(ifr->ifr_data, &sc->ictl, sizeof (lmc_ctl_t)))
146 return -EFAULT;
147 ret = 0;
148 break;
149
150 case LMCIOCSINFO: /*fold01*/
151 sp = &((struct ppp_device *) dev)->sppp;
152 if (!capable(CAP_NET_ADMIN)) {
153 ret = -EPERM;
154 break;
155 }
156
157 if(dev->flags & IFF_UP){
158 ret = -EBUSY;
159 break;
160 }
161
162 if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t)))
163 return -EFAULT;
164
165 sc->lmc_media->set_status (sc, &ctl);
166
167 if(ctl.crc_length != sc->ictl.crc_length) {
168 sc->lmc_media->set_crc_length(sc, ctl.crc_length);
169 if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16)
170 sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
171 else
172 sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE;
173 }
174
175 if (ctl.keepalive_onoff == LMC_CTL_OFF)
176 sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */
177 else
178 sp->pp_flags |= PP_KEEPALIVE; /* Turn on */
179
180 ret = 0;
181 break;
182
183 case LMCIOCIFTYPE: /*fold01*/
184 {
185 u_int16_t old_type = sc->if_type;
186 u_int16_t new_type;
187
188 if (!capable(CAP_NET_ADMIN)) {
189 ret = -EPERM;
190 break;
191 }
192
193 if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t)))
194 return -EFAULT;
195
196
197 if (new_type == old_type)
198 {
199 ret = 0 ;
200 break; /* no change */
201 }
202
203 lmc_proto_close(sc);
204 lmc_proto_detach(sc);
205
206 sc->if_type = new_type;
207// lmc_proto_init(sc);
208 lmc_proto_attach(sc);
209 lmc_proto_open(sc);
210
211 ret = 0 ;
212 break ;
213 }
214
215 case LMCIOCGETXINFO: /*fold01*/
216 sc->lmc_xinfo.Magic0 = 0xBEEFCAFE;
217
218 sc->lmc_xinfo.PciCardType = sc->lmc_cardtype;
219 sc->lmc_xinfo.PciSlotNumber = 0;
220 sc->lmc_xinfo.DriverMajorVersion = DRIVER_MAJOR_VERSION;
221 sc->lmc_xinfo.DriverMinorVersion = DRIVER_MINOR_VERSION;
222 sc->lmc_xinfo.DriverSubVersion = DRIVER_SUB_VERSION;
223 sc->lmc_xinfo.XilinxRevisionNumber =
224 lmc_mii_readreg (sc, 0, 3) & 0xf;
225 sc->lmc_xinfo.MaxFrameSize = LMC_PKT_BUF_SZ;
226 sc->lmc_xinfo.link_status = sc->lmc_media->get_link_status (sc);
227 sc->lmc_xinfo.mii_reg16 = lmc_mii_readreg (sc, 0, 16);
228
229 sc->lmc_xinfo.Magic1 = 0xDEADBEEF;
230
231 if (copy_to_user(ifr->ifr_data, &sc->lmc_xinfo,
232 sizeof (struct lmc_xinfo)))
233 return -EFAULT;
234 ret = 0;
235
236 break;
237
238 case LMCIOCGETLMCSTATS: /*fold01*/
239 if (sc->lmc_cardtype == LMC_CARDTYPE_T1){
240 lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB);
241 sc->stats.framingBitErrorCount +=
242 lmc_mii_readreg (sc, 0, 18) & 0xff;
243 lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB);
244 sc->stats.framingBitErrorCount +=
245 (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8;
246 lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB);
247 sc->stats.lineCodeViolationCount +=
248 lmc_mii_readreg (sc, 0, 18) & 0xff;
249 lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB);
250 sc->stats.lineCodeViolationCount +=
251 (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8;
252 lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR);
253 regVal = lmc_mii_readreg (sc, 0, 18) & 0xff;
254
255 sc->stats.lossOfFrameCount +=
256 (regVal & T1FRAMER_LOF_MASK) >> 4;
257 sc->stats.changeOfFrameAlignmentCount +=
258 (regVal & T1FRAMER_COFA_MASK) >> 2;
259 sc->stats.severelyErroredFrameCount +=
260 regVal & T1FRAMER_SEF_MASK;
261 }
262
263 if (copy_to_user(ifr->ifr_data, &sc->stats,
264 sizeof (struct lmc_statistics)))
265 return -EFAULT;
266
267 ret = 0;
268 break;
269
270 case LMCIOCCLEARLMCSTATS: /*fold01*/
271 if (!capable(CAP_NET_ADMIN)){
272 ret = -EPERM;
273 break;
274 }
275
276 memset (&sc->stats, 0, sizeof (struct lmc_statistics));
277 sc->stats.check = STATCHECK;
278 sc->stats.version_size = (DRIVER_VERSION << 16) +
279 sizeof (struct lmc_statistics);
280 sc->stats.lmc_cardtype = sc->lmc_cardtype;
281 ret = 0;
282 break;
283
284 case LMCIOCSETCIRCUIT: /*fold01*/
285 if (!capable(CAP_NET_ADMIN)){
286 ret = -EPERM;
287 break;
288 }
289
290 if(dev->flags & IFF_UP){
291 ret = -EBUSY;
292 break;
293 }
294
295 if (copy_from_user(&ctl, ifr->ifr_data, sizeof (lmc_ctl_t)))
296 return -EFAULT;
297 sc->lmc_media->set_circuit_type(sc, ctl.circuit_type);
298 sc->ictl.circuit_type = ctl.circuit_type;
299 ret = 0;
300
301 break;
302
303 case LMCIOCRESET: /*fold01*/
304 if (!capable(CAP_NET_ADMIN)){
305 ret = -EPERM;
306 break;
307 }
308
309 /* Reset driver and bring back to current state */
310 printk (" REG16 before reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
311 lmc_running_reset (dev);
312 printk (" REG16 after reset +%04x\n", lmc_mii_readreg (sc, 0, 16));
313
314 LMC_EVENT_LOG(LMC_EVENT_FORCEDRESET, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
315
316 ret = 0;
317 break;
318
319#ifdef DEBUG
320 case LMCIOCDUMPEVENTLOG:
321 if (copy_to_user(ifr->ifr_data, &lmcEventLogIndex, sizeof (u32)))
322 return -EFAULT;
323 if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf)))
324 return -EFAULT;
325
326 ret = 0;
327 break;
328#endif /* end ifdef _DBG_EVENTLOG */
329 case LMCIOCT1CONTROL: /*fold01*/
330 if (sc->lmc_cardtype != LMC_CARDTYPE_T1){
331 ret = -EOPNOTSUPP;
332 break;
333 }
334 break;
335 case LMCIOCXILINX: /*fold01*/
336 {
337 struct lmc_xilinx_control xc; /*fold02*/
338
339 if (!capable(CAP_NET_ADMIN)){
340 ret = -EPERM;
341 break;
342 }
343
344 /*
345 * Stop the xwitter whlie we restart the hardware
346 */
347 netif_stop_queue(dev);
348
349 if (copy_from_user(&xc, ifr->ifr_data, sizeof (struct lmc_xilinx_control)))
350 return -EFAULT;
351 switch(xc.command){
352 case lmc_xilinx_reset: /*fold02*/
353 {
354 u16 mii;
355 mii = lmc_mii_readreg (sc, 0, 16);
356
357 /*
358 * Make all of them 0 and make input
359 */
360 lmc_gpio_mkinput(sc, 0xff);
361
362 /*
363 * make the reset output
364 */
365 lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
366
367 /*
368 * RESET low to force configuration. This also forces
369 * the transmitter clock to be internal, but we expect to reset
370 * that later anyway.
371 */
372
373 sc->lmc_gpio &= ~LMC_GEP_RESET;
374 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
375
376
377 /*
378 * hold for more than 10 microseconds
379 */
380 udelay(50);
381
382 sc->lmc_gpio |= LMC_GEP_RESET;
383 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
384
385
386 /*
387 * stop driving Xilinx-related signals
388 */
389 lmc_gpio_mkinput(sc, 0xff);
390
391 /* Reset the frammer hardware */
392 sc->lmc_media->set_link_status (sc, 1);
393 sc->lmc_media->set_status (sc, NULL);
394// lmc_softreset(sc);
395
396 {
397 int i;
398 for(i = 0; i < 5; i++){
399 lmc_led_on(sc, LMC_DS3_LED0);
400 mdelay(100);
401 lmc_led_off(sc, LMC_DS3_LED0);
402 lmc_led_on(sc, LMC_DS3_LED1);
403 mdelay(100);
404 lmc_led_off(sc, LMC_DS3_LED1);
405 lmc_led_on(sc, LMC_DS3_LED3);
406 mdelay(100);
407 lmc_led_off(sc, LMC_DS3_LED3);
408 lmc_led_on(sc, LMC_DS3_LED2);
409 mdelay(100);
410 lmc_led_off(sc, LMC_DS3_LED2);
411 }
412 }
413
414
415
416 ret = 0x0;
417
418 }
419
420 break;
421 case lmc_xilinx_load_prom: /*fold02*/
422 {
423 u16 mii;
424 int timeout = 500000;
425 mii = lmc_mii_readreg (sc, 0, 16);
426
427 /*
428 * Make all of them 0 and make input
429 */
430 lmc_gpio_mkinput(sc, 0xff);
431
432 /*
433 * make the reset output
434 */
435 lmc_gpio_mkoutput(sc, LMC_GEP_DP | LMC_GEP_RESET);
436
437 /*
438 * RESET low to force configuration. This also forces
439 * the transmitter clock to be internal, but we expect to reset
440 * that later anyway.
441 */
442
443 sc->lmc_gpio &= ~(LMC_GEP_RESET | LMC_GEP_DP);
444 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
445
446
447 /*
448 * hold for more than 10 microseconds
449 */
450 udelay(50);
451
452 sc->lmc_gpio |= LMC_GEP_DP | LMC_GEP_RESET;
453 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
454
455 /*
456 * busy wait for the chip to reset
457 */
458 while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
459 (timeout-- > 0))
460 ;
461
462
463 /*
464 * stop driving Xilinx-related signals
465 */
466 lmc_gpio_mkinput(sc, 0xff);
467
468 ret = 0x0;
469
470
471 break;
472
473 }
474
475 case lmc_xilinx_load: /*fold02*/
476 {
477 char *data;
478 int pos;
479 int timeout = 500000;
480
481 if(xc.data == 0x0){
482 ret = -EINVAL;
483 break;
484 }
485
486 data = kmalloc(xc.len, GFP_KERNEL);
487 if(data == 0x0){
488 printk(KERN_WARNING "%s: Failed to allocate memory for copy\n", dev->name);
489 ret = -ENOMEM;
490 break;
491 }
492
493 if(copy_from_user(data, xc.data, xc.len))
494 {
495 kfree(data);
496 ret = -ENOMEM;
497 break;
498 }
499
500 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
501
502 lmc_gpio_mkinput(sc, 0xff);
503
504 /*
505 * Clear the Xilinx and start prgramming from the DEC
506 */
507
508 /*
509 * Set ouput as:
510 * Reset: 0 (active)
511 * DP: 0 (active)
512 * Mode: 1
513 *
514 */
515 sc->lmc_gpio = 0x00;
516 sc->lmc_gpio &= ~LMC_GEP_DP;
517 sc->lmc_gpio &= ~LMC_GEP_RESET;
518 sc->lmc_gpio |= LMC_GEP_MODE;
519 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
520
521 lmc_gpio_mkoutput(sc, LMC_GEP_MODE | LMC_GEP_DP | LMC_GEP_RESET);
522
523 /*
524 * Wait at least 10 us 20 to be safe
525 */
526 udelay(50);
527
528 /*
529 * Clear reset and activate programming lines
530 * Reset: Input
531 * DP: Input
532 * Clock: Output
533 * Data: Output
534 * Mode: Output
535 */
536 lmc_gpio_mkinput(sc, LMC_GEP_DP | LMC_GEP_RESET);
537
538 /*
539 * Set LOAD, DATA, Clock to 1
540 */
541 sc->lmc_gpio = 0x00;
542 sc->lmc_gpio |= LMC_GEP_MODE;
543 sc->lmc_gpio |= LMC_GEP_DATA;
544 sc->lmc_gpio |= LMC_GEP_CLK;
545 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
546
547 lmc_gpio_mkoutput(sc, LMC_GEP_DATA | LMC_GEP_CLK | LMC_GEP_MODE );
548
549 /*
550 * busy wait for the chip to reset
551 */
552 while( (LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0 &&
553 (timeout-- > 0))
554 ;
555
556 printk(KERN_DEBUG "%s: Waited %d for the Xilinx to clear it's memory\n", dev->name, 500000-timeout);
557
558 for(pos = 0; pos < xc.len; pos++){
559 switch(data[pos]){
560 case 0:
561 sc->lmc_gpio &= ~LMC_GEP_DATA; /* Data is 0 */
562 break;
563 case 1:
564 sc->lmc_gpio |= LMC_GEP_DATA; /* Data is 1 */
565 break;
566 default:
567 printk(KERN_WARNING "%s Bad data in xilinx programming data at %d, got %d wanted 0 or 1\n", dev->name, pos, data[pos]);
568 sc->lmc_gpio |= LMC_GEP_DATA; /* Assume it's 1 */
569 }
570 sc->lmc_gpio &= ~LMC_GEP_CLK; /* Clock to zero */
571 sc->lmc_gpio |= LMC_GEP_MODE;
572 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
573 udelay(1);
574
575 sc->lmc_gpio |= LMC_GEP_CLK; /* Put the clack back to one */
576 sc->lmc_gpio |= LMC_GEP_MODE;
577 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
578 udelay(1);
579 }
580 if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_INIT) == 0){
581 printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (corrupted data)\n", dev->name);
582 }
583 else if((LMC_CSR_READ(sc, csr_gp) & LMC_GEP_DP) == 0){
584 printk(KERN_WARNING "%s: Reprogramming FAILED. Needs to be reprogrammed. (done)\n", dev->name);
585 }
586 else {
587 printk(KERN_DEBUG "%s: Done reprogramming Xilinx, %d bits, good luck!\n", dev->name, pos);
588 }
589
590 lmc_gpio_mkinput(sc, 0xff);
591
592 sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
593 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
594
595 sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
596 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
597
598 kfree(data);
599
600 ret = 0;
601
602 break;
603 }
604 default: /*fold02*/
605 ret = -EBADE;
606 break;
607 }
608
609 netif_wake_queue(dev);
610 sc->lmc_txfull = 0;
611
612 }
613 break;
614 default: /*fold01*/
615 /* If we don't know what to do, give the protocol a shot. */
616 ret = lmc_proto_ioctl (sc, ifr, cmd);
617 break;
618 }
619
620 spin_unlock_irqrestore(&sc->lmc_lock, flags); /*fold01*/
621
622 lmc_trace(dev, "lmc_ioctl out");
623
624 return ret;
625}
626
627
628/* the watchdog process that cruises around */
629static void lmc_watchdog (unsigned long data) /*fold00*/
630{
631 struct net_device *dev = (struct net_device *) data;
632 lmc_softc_t *sc;
633 int link_status;
634 u_int32_t ticks;
635 unsigned long flags;
636
637 sc = dev->priv;
638
639 lmc_trace(dev, "lmc_watchdog in");
640
641 spin_lock_irqsave(&sc->lmc_lock, flags);
642
643 if(sc->check != 0xBEAFCAFE){
644 printk("LMC: Corrupt net_device stuct, breaking out\n");
645 spin_unlock_irqrestore(&sc->lmc_lock, flags);
646 return;
647 }
648
649
650 /* Make sure the tx jabber and rx watchdog are off,
651 * and the transmit and receive processes are running.
652 */
653
654 LMC_CSR_WRITE (sc, csr_15, 0x00000011);
655 sc->lmc_cmdmode |= TULIP_CMD_TXRUN | TULIP_CMD_RXRUN;
656 LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
657
658 if (sc->lmc_ok == 0)
659 goto kick_timer;
660
661 LMC_EVENT_LOG(LMC_EVENT_WATCHDOG, LMC_CSR_READ (sc, csr_status), lmc_mii_readreg (sc, 0, 16));
662
663 /* --- begin time out check -----------------------------------
664 * check for a transmit interrupt timeout
665 * Has the packet xmt vs xmt serviced threshold been exceeded */
666 if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
667 sc->stats.tx_packets > sc->lasttx_packets &&
668 sc->tx_TimeoutInd == 0)
669 {
670
671 /* wait for the watchdog to come around again */
672 sc->tx_TimeoutInd = 1;
673 }
674 else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx &&
675 sc->stats.tx_packets > sc->lasttx_packets &&
676 sc->tx_TimeoutInd)
677 {
678
679 LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0);
680
681 sc->tx_TimeoutDisplay = 1;
682 sc->stats.tx_TimeoutCnt++;
683
684 /* DEC chip is stuck, hit it with a RESET!!!! */
685 lmc_running_reset (dev);
686
687
688 /* look at receive & transmit process state to make sure they are running */
689 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
690
691 /* look at: DSR - 02 for Reg 16
692 * CTS - 08
693 * DCD - 10
694 * RI - 20
695 * for Reg 17
696 */
697 LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg (sc, 0, 16), lmc_mii_readreg (sc, 0, 17));
698
699 /* reset the transmit timeout detection flag */
700 sc->tx_TimeoutInd = 0;
701 sc->lastlmc_taint_tx = sc->lmc_taint_tx;
702 sc->lasttx_packets = sc->stats.tx_packets;
703 }
704 else
705 {
706 sc->tx_TimeoutInd = 0;
707 sc->lastlmc_taint_tx = sc->lmc_taint_tx;
708 sc->lasttx_packets = sc->stats.tx_packets;
709 }
710
711 /* --- end time out check ----------------------------------- */
712
713
714 link_status = sc->lmc_media->get_link_status (sc);
715
716 /*
717 * hardware level link lost, but the interface is marked as up.
718 * Mark it as down.
719 */
720 if ((link_status == 0) && (sc->last_link_status != 0)) {
721 printk(KERN_WARNING "%s: hardware/physical link down\n", dev->name);
722 sc->last_link_status = 0;
723 /* lmc_reset (sc); Why reset??? The link can go down ok */
724
725 /* Inform the world that link has been lost */
726 dev->flags &= ~IFF_RUNNING;
727 }
728
729 /*
730 * hardware link is up, but the interface is marked as down.
731 * Bring it back up again.
732 */
733 if (link_status != 0 && sc->last_link_status == 0) {
734 printk(KERN_WARNING "%s: hardware/physical link up\n", dev->name);
735 sc->last_link_status = 1;
736 /* lmc_reset (sc); Again why reset??? */
737
738 /* Inform the world that link protocol is back up. */
739 dev->flags |= IFF_RUNNING;
740
741 /* Now we have to tell the syncppp that we had an outage
742 * and that it should deal. Calling sppp_reopen here
743 * should do the trick, but we may have to call sppp_close
744 * when the link goes down, and call sppp_open here.
745 * Subject to more testing.
746 * --bbraun
747 */
748
749 lmc_proto_reopen(sc);
750
751 }
752
753 /* Call media specific watchdog functions */
754 sc->lmc_media->watchdog(sc);
755
756 /*
757 * Poke the transmitter to make sure it
758 * never stops, even if we run out of mem
759 */
760 LMC_CSR_WRITE(sc, csr_rxpoll, 0);
761
762 /*
763 * Check for code that failed
764 * and try and fix it as appropriate
765 */
766 if(sc->failed_ring == 1){
767 /*
768 * Failed to setup the recv/xmit rin
769 * Try again
770 */
771 sc->failed_ring = 0;
772 lmc_softreset(sc);
773 }
774 if(sc->failed_recv_alloc == 1){
775 /*
776 * We failed to alloc mem in the
777 * interrupt handler, go through the rings
778 * and rebuild them
779 */
780 sc->failed_recv_alloc = 0;
781 lmc_softreset(sc);
782 }
783
784
785 /*
786 * remember the timer value
787 */
788kick_timer:
789
790 ticks = LMC_CSR_READ (sc, csr_gp_timer);
791 LMC_CSR_WRITE (sc, csr_gp_timer, 0xffffffffUL);
792 sc->ictl.ticks = 0x0000ffff - (ticks & 0x0000ffff);
793
794 /*
795 * restart this timer.
796 */
797 sc->timer.expires = jiffies + (HZ);
798 add_timer (&sc->timer);
799
800 spin_unlock_irqrestore(&sc->lmc_lock, flags);
801
802 lmc_trace(dev, "lmc_watchdog out");
803
804}
805
806static void lmc_setup(struct net_device * const dev) /*fold00*/
807{
808 lmc_trace(dev, "lmc_setup in");
809
810 dev->type = ARPHRD_HDLC;
811 dev->hard_start_xmit = lmc_start_xmit;
812 dev->open = lmc_open;
813 dev->stop = lmc_close;
814 dev->get_stats = lmc_get_stats;
815 dev->do_ioctl = lmc_ioctl;
816 dev->tx_timeout = lmc_driver_timeout;
817 dev->watchdog_timeo = (HZ); /* 1 second */
818
819 lmc_trace(dev, "lmc_setup out");
820}
821
822
823static int __devinit lmc_init_one(struct pci_dev *pdev,
824 const struct pci_device_id *ent)
825{
826 struct net_device *dev;
827 lmc_softc_t *sc;
828 u16 subdevice;
829 u_int16_t AdapModelNum;
830 int err = -ENOMEM;
831 static int cards_found;
832#ifndef GCOM
833 /* We name by type not by vendor */
834 static const char lmcname[] = "hdlc%d";
835#else
836 /*
837 * GCOM uses LMC vendor name so that clients can know which card
838 * to attach to.
839 */
840 static const char lmcname[] = "lmc%d";
841#endif
842
843
844 /*
845 * Allocate our own device structure
846 */
847 dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup);
848 if (!dev) {
849 printk (KERN_ERR "lmc:alloc_netdev for device failed\n");
850 goto out1;
851 }
852
853 lmc_trace(dev, "lmc_init_one in");
854
855 err = pci_enable_device(pdev);
856 if (err) {
857 printk(KERN_ERR "lmc: pci enable failed:%d\n", err);
858 goto out2;
859 }
860
861 if (pci_request_regions(pdev, "lmc")) {
862 printk(KERN_ERR "lmc: pci_request_region failed\n");
863 err = -EIO;
864 goto out3;
865 }
866
867 pci_set_drvdata(pdev, dev);
868
869 if(lmc_first_load == 0){
870 printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n",
871 DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION);
872 lmc_first_load = 1;
873 }
874
875 sc = dev->priv;
876 sc->lmc_device = dev;
877 sc->name = dev->name;
878
879 /* Initialize the sppp layer */
880 /* An ioctl can cause a subsequent detach for raw frame interface */
881 sc->if_type = LMC_PPP;
882 sc->check = 0xBEAFCAFE;
883 dev->base_addr = pci_resource_start(pdev, 0);
884 dev->irq = pdev->irq;
885
886 SET_MODULE_OWNER(dev);
887 SET_NETDEV_DEV(dev, &pdev->dev);
888
889 /*
890 * This will get the protocol layer ready and do any 1 time init's
891 * Must have a valid sc and dev structure
892 */
893 lmc_proto_init(sc);
894
895 lmc_proto_attach(sc);
896
897 /*
898 * Why were we changing this???
899 dev->tx_queue_len = 100;
900 */
901
902 /* Init the spin lock so can call it latter */
903
904 spin_lock_init(&sc->lmc_lock);
905 pci_set_master(pdev);
906
907 printk ("%s: detected at %lx, irq %d\n", dev->name,
908 dev->base_addr, dev->irq);
909
910 if (register_netdev (dev) != 0) {
911 printk (KERN_ERR "%s: register_netdev failed.\n", dev->name);
912 goto out4;
913 }
914
915 sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN;
916 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
917
918 /*
919 *
920 * Check either the subvendor or the subdevice, some systems reverse
921 * the setting in the bois, seems to be version and arch dependent?
922 * Fix the error, exchange the two values
923 */
924 if ((subdevice = pdev->subsystem_device) == PCI_VENDOR_ID_LMC)
925 subdevice = pdev->subsystem_vendor;
926
927 switch (subdevice) {
928 case PCI_DEVICE_ID_LMC_HSSI:
929 printk ("%s: LMC HSSI\n", dev->name);
930 sc->lmc_cardtype = LMC_CARDTYPE_HSSI;
931 sc->lmc_media = &lmc_hssi_media;
932 break;
933 case PCI_DEVICE_ID_LMC_DS3:
934 printk ("%s: LMC DS3\n", dev->name);
935 sc->lmc_cardtype = LMC_CARDTYPE_DS3;
936 sc->lmc_media = &lmc_ds3_media;
937 break;
938 case PCI_DEVICE_ID_LMC_SSI:
939 printk ("%s: LMC SSI\n", dev->name);
940 sc->lmc_cardtype = LMC_CARDTYPE_SSI;
941 sc->lmc_media = &lmc_ssi_media;
942 break;
943 case PCI_DEVICE_ID_LMC_T1:
944 printk ("%s: LMC T1\n", dev->name);
945 sc->lmc_cardtype = LMC_CARDTYPE_T1;
946 sc->lmc_media = &lmc_t1_media;
947 break;
948 default:
949 printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name);
950 break;
951 }
952
953 lmc_initcsrs (sc, dev->base_addr, 8);
954
955 lmc_gpio_mkinput (sc, 0xff);
956 sc->lmc_gpio = 0; /* drive no signals yet */
957
958 sc->lmc_media->defaults (sc);
959
960 sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
961
962 /* verify that the PCI Sub System ID matches the Adapter Model number
963 * from the MII register
964 */
965 AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4;
966
967 if ((AdapModelNum == LMC_ADAP_T1
968 && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */
969 (AdapModelNum == LMC_ADAP_SSI
970 && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */
971 (AdapModelNum == LMC_ADAP_DS3
972 && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */
973 (AdapModelNum == LMC_ADAP_HSSI
974 && subdevice == PCI_DEVICE_ID_LMC_HSSI))
975 { /* detect LMC5200 */
976
977 }
978 else {
979 printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n",
980 dev->name, AdapModelNum, subdevice);
981// return (NULL);
982 }
983 /*
984 * reset clock
985 */
986 LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL);
987
988 sc->board_idx = cards_found++;
989 sc->stats.check = STATCHECK;
990 sc->stats.version_size = (DRIVER_VERSION << 16) +
991 sizeof (struct lmc_statistics);
992 sc->stats.lmc_cardtype = sc->lmc_cardtype;
993
994 sc->lmc_ok = 0;
995 sc->last_link_status = 0;
996
997 lmc_trace(dev, "lmc_init_one out");
998 return 0;
999
1000 out4:
1001 lmc_proto_detach(sc);
1002 out3:
1003 if (pdev) {
1004 pci_release_regions(pdev);
1005 pci_set_drvdata(pdev, NULL);
1006 }
1007 out2:
1008 free_netdev(dev);
1009 out1:
1010 return err;
1011}
1012
1013/*
1014 * Called from pci when removing module.
1015 */
1016static void __devexit lmc_remove_one (struct pci_dev *pdev)
1017{
1018 struct net_device *dev = pci_get_drvdata(pdev);
1019
1020 if (dev) {
1021 lmc_softc_t *sc = dev->priv;
1022
1023 printk("%s: removing...\n", dev->name);
1024 lmc_proto_detach(sc);
1025 unregister_netdev(dev);
1026 free_netdev(dev);
1027 pci_release_regions(pdev);
1028 pci_disable_device(pdev);
1029 pci_set_drvdata(pdev, NULL);
1030 }
1031}
1032
1033/* After this is called, packets can be sent.
1034 * Does not initialize the addresses
1035 */
1036static int lmc_open (struct net_device *dev) /*fold00*/
1037{
1038 lmc_softc_t *sc = dev->priv;
1039
1040 lmc_trace(dev, "lmc_open in");
1041
1042 lmc_led_on(sc, LMC_DS3_LED0);
1043
1044 lmc_dec_reset (sc);
1045 lmc_reset (sc);
1046
1047 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1048 LMC_EVENT_LOG(LMC_EVENT_RESET2,
1049 lmc_mii_readreg (sc, 0, 16),
1050 lmc_mii_readreg (sc, 0, 17));
1051
1052
1053 if (sc->lmc_ok){
1054 lmc_trace(dev, "lmc_open lmc_ok out");
1055 return (0);
1056 }
1057
1058 lmc_softreset (sc);
1059
1060 /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1061 if (request_irq (dev->irq, &lmc_interrupt, SA_SHIRQ, dev->name, dev)){
1062 printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
1063 lmc_trace(dev, "lmc_open irq failed out");
1064 return -EAGAIN;
1065 }
1066 sc->got_irq = 1;
1067
1068 /* Assert Terminal Active */
1069 sc->lmc_miireg16 |= LMC_MII16_LED_ALL;
1070 sc->lmc_media->set_link_status (sc, LMC_LINK_UP);
1071
1072 /*
1073 * reset to last state.
1074 */
1075 sc->lmc_media->set_status (sc, NULL);
1076
1077 /* setup default bits to be used in tulip_desc_t transmit descriptor
1078 * -baz */
1079 sc->TxDescriptControlInit = (
1080 LMC_TDES_INTERRUPT_ON_COMPLETION
1081 | LMC_TDES_FIRST_SEGMENT
1082 | LMC_TDES_LAST_SEGMENT
1083 | LMC_TDES_SECOND_ADDR_CHAINED
1084 | LMC_TDES_DISABLE_PADDING
1085 );
1086
1087 if (sc->ictl.crc_length == LMC_CTL_CRC_LENGTH_16) {
1088 /* disable 32 bit CRC generated by ASIC */
1089 sc->TxDescriptControlInit |= LMC_TDES_ADD_CRC_DISABLE;
1090 }
1091 sc->lmc_media->set_crc_length(sc, sc->ictl.crc_length);
1092 /* Acknoledge the Terminal Active and light LEDs */
1093
1094 /* dev->flags |= IFF_UP; */
1095
1096 lmc_proto_open(sc);
1097
1098 dev->do_ioctl = lmc_ioctl;
1099
1100
1101 netif_start_queue(dev);
1102
1103 sc->stats.tx_tbusy0++ ;
1104
1105 /*
1106 * select what interrupts we want to get
1107 */
1108 sc->lmc_intrmask = 0;
1109 /* Should be using the default interrupt mask defined in the .h file. */
1110 sc->lmc_intrmask |= (TULIP_STS_NORMALINTR
1111 | TULIP_STS_RXINTR
1112 | TULIP_STS_TXINTR
1113 | TULIP_STS_ABNRMLINTR
1114 | TULIP_STS_SYSERROR
1115 | TULIP_STS_TXSTOPPED
1116 | TULIP_STS_TXUNDERFLOW
1117 | TULIP_STS_RXSTOPPED
1118 | TULIP_STS_RXNOBUF
1119 );
1120 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1121
1122 sc->lmc_cmdmode |= TULIP_CMD_TXRUN;
1123 sc->lmc_cmdmode |= TULIP_CMD_RXRUN;
1124 LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1125
1126 sc->lmc_ok = 1; /* Run watchdog */
1127
1128 /*
1129 * Set the if up now - pfb
1130 */
1131
1132 sc->last_link_status = 1;
1133
1134 /*
1135 * Setup a timer for the watchdog on probe, and start it running.
1136 * Since lmc_ok == 0, it will be a NOP for now.
1137 */
1138 init_timer (&sc->timer);
1139 sc->timer.expires = jiffies + HZ;
1140 sc->timer.data = (unsigned long) dev;
1141 sc->timer.function = &lmc_watchdog;
1142 add_timer (&sc->timer);
1143
1144 lmc_trace(dev, "lmc_open out");
1145
1146 return (0);
1147}
1148
1149/* Total reset to compensate for the AdTran DSU doing bad things
1150 * under heavy load
1151 */
1152
1153static void lmc_running_reset (struct net_device *dev) /*fold00*/
1154{
1155
1156 lmc_softc_t *sc = (lmc_softc_t *) dev->priv;
1157
1158 lmc_trace(dev, "lmc_runnig_reset in");
1159
1160 /* stop interrupts */
1161 /* Clear the interrupt mask */
1162 LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1163
1164 lmc_dec_reset (sc);
1165 lmc_reset (sc);
1166 lmc_softreset (sc);
1167 /* sc->lmc_miireg16 |= LMC_MII16_LED_ALL; */
1168 sc->lmc_media->set_link_status (sc, 1);
1169 sc->lmc_media->set_status (sc, NULL);
1170
1171 //dev->flags |= IFF_RUNNING;
1172
1173 netif_wake_queue(dev);
1174
1175 sc->lmc_txfull = 0;
1176 sc->stats.tx_tbusy0++ ;
1177
1178 sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK;
1179 LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask);
1180
1181 sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
1182 LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1183
1184 lmc_trace(dev, "lmc_runnin_reset_out");
1185}
1186
1187
1188/* This is what is called when you ifconfig down a device.
1189 * This disables the timer for the watchdog and keepalives,
1190 * and disables the irq for dev.
1191 */
1192static int lmc_close (struct net_device *dev) /*fold00*/
1193{
1194 /* not calling release_region() as we should */
1195 lmc_softc_t *sc;
1196
1197 lmc_trace(dev, "lmc_close in");
1198
1199 sc = dev->priv;
1200 sc->lmc_ok = 0;
1201 sc->lmc_media->set_link_status (sc, 0);
1202 del_timer (&sc->timer);
1203 lmc_proto_close(sc);
1204 lmc_ifdown (dev);
1205
1206 lmc_trace(dev, "lmc_close out");
1207
1208 return 0;
1209}
1210
1211/* Ends the transfer of packets */
1212/* When the interface goes down, this is called */
1213static int lmc_ifdown (struct net_device *dev) /*fold00*/
1214{
1215 lmc_softc_t *sc = dev->priv;
1216 u32 csr6;
1217 int i;
1218
1219 lmc_trace(dev, "lmc_ifdown in");
1220
1221 /* Don't let anything else go on right now */
1222 // dev->start = 0;
1223 netif_stop_queue(dev);
1224 sc->stats.tx_tbusy1++ ;
1225
1226 /* stop interrupts */
1227 /* Clear the interrupt mask */
1228 LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
1229
1230 /* Stop Tx and Rx on the chip */
1231 csr6 = LMC_CSR_READ (sc, csr_command);
1232 csr6 &= ~LMC_DEC_ST; /* Turn off the Transmission bit */
1233 csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */
1234 LMC_CSR_WRITE (sc, csr_command, csr6);
1235
1236 dev->flags &= ~IFF_RUNNING;
1237
1238 sc->stats.rx_missed_errors +=
1239 LMC_CSR_READ (sc, csr_missed_frames) & 0xffff;
1240
1241 /* release the interrupt */
1242 if(sc->got_irq == 1){
1243 free_irq (dev->irq, dev);
1244 sc->got_irq = 0;
1245 }
1246
1247 /* free skbuffs in the Rx queue */
1248 for (i = 0; i < LMC_RXDESCS; i++)
1249 {
1250 struct sk_buff *skb = sc->lmc_rxq[i];
1251 sc->lmc_rxq[i] = NULL;
1252 sc->lmc_rxring[i].status = 0;
1253 sc->lmc_rxring[i].length = 0;
1254 sc->lmc_rxring[i].buffer1 = 0xDEADBEEF;
1255 if (skb != NULL)
1256 dev_kfree_skb(skb);
1257 sc->lmc_rxq[i] = NULL;
1258 }
1259
1260 for (i = 0; i < LMC_TXDESCS; i++)
1261 {
1262 if (sc->lmc_txq[i] != NULL)
1263 dev_kfree_skb(sc->lmc_txq[i]);
1264 sc->lmc_txq[i] = NULL;
1265 }
1266
1267 lmc_led_off (sc, LMC_MII16_LED_ALL);
1268
1269 netif_wake_queue(dev);
1270 sc->stats.tx_tbusy0++ ;
1271
1272 lmc_trace(dev, "lmc_ifdown out");
1273
1274 return 0;
1275}
1276
1277/* Interrupt handling routine. This will take an incoming packet, or clean
1278 * up after a trasmit.
1279 */
1280static irqreturn_t lmc_interrupt (int irq, void *dev_instance, struct pt_regs *regs) /*fold00*/
1281{
1282 struct net_device *dev = (struct net_device *) dev_instance;
1283 lmc_softc_t *sc;
1284 u32 csr;
1285 int i;
1286 s32 stat;
1287 unsigned int badtx;
1288 u32 firstcsr;
1289 int max_work = LMC_RXDESCS;
1290 int handled = 0;
1291
1292 lmc_trace(dev, "lmc_interrupt in");
1293
1294 sc = dev->priv;
1295
1296 spin_lock(&sc->lmc_lock);
1297
1298 /*
1299 * Read the csr to find what interrupts we have (if any)
1300 */
1301 csr = LMC_CSR_READ (sc, csr_status);
1302
1303 /*
1304 * Make sure this is our interrupt
1305 */
1306 if ( ! (csr & sc->lmc_intrmask)) {
1307 goto lmc_int_fail_out;
1308 }
1309
1310 firstcsr = csr;
1311
1312 /* always go through this loop at least once */
1313 while (csr & sc->lmc_intrmask) {
1314 handled = 1;
1315
1316 /*
1317 * Clear interrupt bits, we handle all case below
1318 */
1319 LMC_CSR_WRITE (sc, csr_status, csr);
1320
1321 /*
1322 * One of
1323 * - Transmit process timed out CSR5<1>
1324 * - Transmit jabber timeout CSR5<3>
1325 * - Transmit underflow CSR5<5>
1326 * - Transmit Receiver buffer unavailable CSR5<7>
1327 * - Receive process stopped CSR5<8>
1328 * - Receive watchdog timeout CSR5<9>
1329 * - Early transmit interrupt CSR5<10>
1330 *
1331 * Is this really right? Should we do a running reset for jabber?
1332 * (being a WAN card and all)
1333 */
1334 if (csr & TULIP_STS_ABNRMLINTR){
1335 lmc_running_reset (dev);
1336 break;
1337 }
1338
1339 if (csr & TULIP_STS_RXINTR){
1340 lmc_trace(dev, "rx interrupt");
1341 lmc_rx (dev);
1342
1343 }
1344 if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
1345
1346 int n_compl = 0 ;
1347 /* reset the transmit timeout detection flag -baz */
1348 sc->stats.tx_NoCompleteCnt = 0;
1349
1350 badtx = sc->lmc_taint_tx;
1351 i = badtx % LMC_TXDESCS;
1352
1353 while ((badtx < sc->lmc_next_tx)) {
1354 stat = sc->lmc_txring[i].status;
1355
1356 LMC_EVENT_LOG (LMC_EVENT_XMTINT, stat,
1357 sc->lmc_txring[i].length);
1358 /*
1359 * If bit 31 is 1 the tulip owns it break out of the loop
1360 */
1361 if (stat & 0x80000000)
1362 break;
1363
1364 n_compl++ ; /* i.e., have an empty slot in ring */
1365 /*
1366 * If we have no skbuff or have cleared it
1367 * Already continue to the next buffer
1368 */
1369 if (sc->lmc_txq[i] == NULL)
1370 continue;
1371
1372 /*
1373 * Check the total error summary to look for any errors
1374 */
1375 if (stat & 0x8000) {
1376 sc->stats.tx_errors++;
1377 if (stat & 0x4104)
1378 sc->stats.tx_aborted_errors++;
1379 if (stat & 0x0C00)
1380 sc->stats.tx_carrier_errors++;
1381 if (stat & 0x0200)
1382 sc->stats.tx_window_errors++;
1383 if (stat & 0x0002)
1384 sc->stats.tx_fifo_errors++;
1385 }
1386 else {
1387
1388 sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff;
1389
1390 sc->stats.tx_packets++;
1391 }
1392
1393 // dev_kfree_skb(sc->lmc_txq[i]);
1394 dev_kfree_skb_irq(sc->lmc_txq[i]);
1395 sc->lmc_txq[i] = NULL;
1396
1397 badtx++;
1398 i = badtx % LMC_TXDESCS;
1399 }
1400
1401 if (sc->lmc_next_tx - badtx > LMC_TXDESCS)
1402 {
1403 printk ("%s: out of sync pointer\n", dev->name);
1404 badtx += LMC_TXDESCS;
1405 }
1406 LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0);
1407 sc->lmc_txfull = 0;
1408 netif_wake_queue(dev);
1409 sc->stats.tx_tbusy0++ ;
1410
1411
1412#ifdef DEBUG
1413 sc->stats.dirtyTx = badtx;
1414 sc->stats.lmc_next_tx = sc->lmc_next_tx;
1415 sc->stats.lmc_txfull = sc->lmc_txfull;
1416#endif
1417 sc->lmc_taint_tx = badtx;
1418
1419 /*
1420 * Why was there a break here???
1421 */
1422 } /* end handle transmit interrupt */
1423
1424 if (csr & TULIP_STS_SYSERROR) {
1425 u32 error;
1426 printk (KERN_WARNING "%s: system bus error csr: %#8.8x\n", dev->name, csr);
1427 error = csr>>23 & 0x7;
1428 switch(error){
1429 case 0x000:
1430 printk(KERN_WARNING "%s: Parity Fault (bad)\n", dev->name);
1431 break;
1432 case 0x001:
1433 printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
1434 break;
1435 case 0x010:
1436 printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
1437 break;
1438 default:
1439 printk(KERN_WARNING "%s: This bus error code was supposed to be reserved!\n", dev->name);
1440 }
1441 lmc_dec_reset (sc);
1442 lmc_reset (sc);
1443 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
1444 LMC_EVENT_LOG(LMC_EVENT_RESET2,
1445 lmc_mii_readreg (sc, 0, 16),
1446 lmc_mii_readreg (sc, 0, 17));
1447
1448 }
1449
1450
1451 if(max_work-- <= 0)
1452 break;
1453
1454 /*
1455 * Get current csr status to make sure
1456 * we've cleared all interrupts
1457 */
1458 csr = LMC_CSR_READ (sc, csr_status);
1459 } /* end interrupt loop */
1460 LMC_EVENT_LOG(LMC_EVENT_INT, firstcsr, csr);
1461
1462lmc_int_fail_out:
1463
1464 spin_unlock(&sc->lmc_lock);
1465
1466 lmc_trace(dev, "lmc_interrupt out");
1467 return IRQ_RETVAL(handled);
1468}
1469
1470static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/
1471{
1472 lmc_softc_t *sc;
1473 u32 flag;
1474 int entry;
1475 int ret = 0;
1476 unsigned long flags;
1477
1478 lmc_trace(dev, "lmc_start_xmit in");
1479
1480 sc = dev->priv;
1481
1482 spin_lock_irqsave(&sc->lmc_lock, flags);
1483
1484 /* normal path, tbusy known to be zero */
1485
1486 entry = sc->lmc_next_tx % LMC_TXDESCS;
1487
1488 sc->lmc_txq[entry] = skb;
1489 sc->lmc_txring[entry].buffer1 = virt_to_bus (skb->data);
1490
1491 LMC_CONSOLE_LOG("xmit", skb->data, skb->len);
1492
1493#ifndef GCOM
1494 /* If the queue is less than half full, don't interrupt */
1495 if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS / 2)
1496 {
1497 /* Do not interrupt on completion of this packet */
1498 flag = 0x60000000;
1499 netif_wake_queue(dev);
1500 }
1501 else if (sc->lmc_next_tx - sc->lmc_taint_tx == LMC_TXDESCS / 2)
1502 {
1503 /* This generates an interrupt on completion of this packet */
1504 flag = 0xe0000000;
1505 netif_wake_queue(dev);
1506 }
1507 else if (sc->lmc_next_tx - sc->lmc_taint_tx < LMC_TXDESCS - 1)
1508 {
1509 /* Do not interrupt on completion of this packet */
1510 flag = 0x60000000;
1511 netif_wake_queue(dev);
1512 }
1513 else
1514 {
1515 /* This generates an interrupt on completion of this packet */
1516 flag = 0xe0000000;
1517 sc->lmc_txfull = 1;
1518 netif_stop_queue(dev);
1519 }
1520#else
1521 flag = LMC_TDES_INTERRUPT_ON_COMPLETION;
1522
1523 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1)
1524 { /* ring full, go busy */
1525 sc->lmc_txfull = 1;
1526 netif_stop_queue(dev);
1527 sc->stats.tx_tbusy1++ ;
1528 LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0);
1529 }
1530#endif
1531
1532
1533 if (entry == LMC_TXDESCS - 1) /* last descriptor in ring */
1534 flag |= LMC_TDES_END_OF_RING; /* flag as such for Tulip */
1535
1536 /* don't pad small packets either */
1537 flag = sc->lmc_txring[entry].length = (skb->len) | flag |
1538 sc->TxDescriptControlInit;
1539
1540 /* set the transmit timeout flag to be checked in
1541 * the watchdog timer handler. -baz
1542 */
1543
1544 sc->stats.tx_NoCompleteCnt++;
1545 sc->lmc_next_tx++;
1546
1547 /* give ownership to the chip */
1548 LMC_EVENT_LOG(LMC_EVENT_XMT, flag, entry);
1549 sc->lmc_txring[entry].status = 0x80000000;
1550
1551 /* send now! */
1552 LMC_CSR_WRITE (sc, csr_txpoll, 0);
1553
1554 dev->trans_start = jiffies;
1555
1556 spin_unlock_irqrestore(&sc->lmc_lock, flags);
1557
1558 lmc_trace(dev, "lmc_start_xmit_out");
1559 return ret;
1560}
1561
1562
1563static int lmc_rx (struct net_device *dev) /*fold00*/
1564{
1565 lmc_softc_t *sc;
1566 int i;
1567 int rx_work_limit = LMC_RXDESCS;
1568 unsigned int next_rx;
1569 int rxIntLoopCnt; /* debug -baz */
1570 int localLengthErrCnt = 0;
1571 long stat;
1572 struct sk_buff *skb, *nsb;
1573 u16 len;
1574
1575 lmc_trace(dev, "lmc_rx in");
1576
1577 sc = dev->priv;
1578
1579 lmc_led_on(sc, LMC_DS3_LED3);
1580
1581 rxIntLoopCnt = 0; /* debug -baz */
1582
1583 i = sc->lmc_next_rx % LMC_RXDESCS;
1584 next_rx = sc->lmc_next_rx;
1585
1586 while (((stat = sc->lmc_rxring[i].status) & LMC_RDES_OWN_BIT) != DESC_OWNED_BY_DC21X4)
1587 {
1588 rxIntLoopCnt++; /* debug -baz */
1589 len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER);
1590 if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */
1591 if ((stat & 0x0000ffff) != 0x7fff) {
1592 /* Oversized frame */
1593 sc->stats.rx_length_errors++;
1594 goto skip_packet;
1595 }
1596 }
1597
1598 if(stat & 0x00000008){ /* Catch a dribbling bit error */
1599 sc->stats.rx_errors++;
1600 sc->stats.rx_frame_errors++;
1601 goto skip_packet;
1602 }
1603
1604
1605 if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */
1606 sc->stats.rx_errors++;
1607 sc->stats.rx_crc_errors++;
1608 goto skip_packet;
1609 }
1610
1611
1612 if (len > LMC_PKT_BUF_SZ){
1613 sc->stats.rx_length_errors++;
1614 localLengthErrCnt++;
1615 goto skip_packet;
1616 }
1617
1618 if (len < sc->lmc_crcSize + 2) {
1619 sc->stats.rx_length_errors++;
1620 sc->stats.rx_SmallPktCnt++;
1621 localLengthErrCnt++;
1622 goto skip_packet;
1623 }
1624
1625 if(stat & 0x00004000){
1626 printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name);
1627 }
1628
1629 len -= sc->lmc_crcSize;
1630
1631 skb = sc->lmc_rxq[i];
1632
1633 /*
1634 * We ran out of memory at some point
1635 * just allocate an skb buff and continue.
1636 */
1637
1638 if(skb == 0x0){
1639 nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1640 if (nsb) {
1641 sc->lmc_rxq[i] = nsb;
1642 nsb->dev = dev;
1643 sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail);
1644 }
1645 sc->failed_recv_alloc = 1;
1646 goto skip_packet;
1647 }
1648
1649 dev->last_rx = jiffies;
1650 sc->stats.rx_packets++;
1651 sc->stats.rx_bytes += len;
1652
1653 LMC_CONSOLE_LOG("recv", skb->data, len);
1654
1655 /*
1656 * I'm not sure of the sanity of this
1657 * Packets could be arriving at a constant
1658 * 44.210mbits/sec and we're going to copy
1659 * them into a new buffer??
1660 */
1661
1662 if(len > (LMC_MTU - (LMC_MTU>>2))){ /* len > LMC_MTU * 0.75 */
1663 /*
1664 * If it's a large packet don't copy it just hand it up
1665 */
1666 give_it_anyways:
1667
1668 sc->lmc_rxq[i] = NULL;
1669 sc->lmc_rxring[i].buffer1 = 0x0;
1670
1671 skb_put (skb, len);
1672 skb->protocol = lmc_proto_type(sc, skb);
1673 skb->protocol = htons(ETH_P_WAN_PPP);
1674 skb->mac.raw = skb->data;
1675// skb->nh.raw = skb->data;
1676 skb->dev = dev;
1677 lmc_proto_netif(sc, skb);
1678
1679 /*
1680 * This skb will be destroyed by the upper layers, make a new one
1681 */
1682 nsb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1683 if (nsb) {
1684 sc->lmc_rxq[i] = nsb;
1685 nsb->dev = dev;
1686 sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail);
1687 /* Transferred to 21140 below */
1688 }
1689 else {
1690 /*
1691 * We've run out of memory, stop trying to allocate
1692 * memory and exit the interrupt handler
1693 *
1694 * The chip may run out of receivers and stop
1695 * in which care we'll try to allocate the buffer
1696 * again. (once a second)
1697 */
1698 sc->stats.rx_BuffAllocErr++;
1699 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1700 sc->failed_recv_alloc = 1;
1701 goto skip_out_of_mem;
1702 }
1703 }
1704 else {
1705 nsb = dev_alloc_skb(len);
1706 if(!nsb) {
1707 goto give_it_anyways;
1708 }
1709 memcpy(skb_put(nsb, len), skb->data, len);
1710
1711 nsb->protocol = lmc_proto_type(sc, skb);
1712 nsb->mac.raw = nsb->data;
1713// nsb->nh.raw = nsb->data;
1714 nsb->dev = dev;
1715 lmc_proto_netif(sc, nsb);
1716 }
1717
1718 skip_packet:
1719 LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len);
1720 sc->lmc_rxring[i].status = DESC_OWNED_BY_DC21X4;
1721
1722 sc->lmc_next_rx++;
1723 i = sc->lmc_next_rx % LMC_RXDESCS;
1724 rx_work_limit--;
1725 if (rx_work_limit < 0)
1726 break;
1727 }
1728
1729 /* detect condition for LMC1000 where DSU cable attaches and fills
1730 * descriptors with bogus packets
1731 *
1732 if (localLengthErrCnt > LMC_RXDESCS - 3) {
1733 sc->stats.rx_BadPktSurgeCnt++;
1734 LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE,
1735 localLengthErrCnt,
1736 sc->stats.rx_BadPktSurgeCnt);
1737 } */
1738
1739 /* save max count of receive descriptors serviced */
1740 if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) {
1741 sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */
1742 }
1743
1744#ifdef DEBUG
1745 if (rxIntLoopCnt == 0)
1746 {
1747 for (i = 0; i < LMC_RXDESCS; i++)
1748 {
1749 if ((sc->lmc_rxring[i].status & LMC_RDES_OWN_BIT)
1750 != DESC_OWNED_BY_DC21X4)
1751 {
1752 rxIntLoopCnt++;
1753 }
1754 }
1755 LMC_EVENT_LOG(LMC_EVENT_RCVEND, rxIntLoopCnt, 0);
1756 }
1757#endif
1758
1759
1760 lmc_led_off(sc, LMC_DS3_LED3);
1761
1762skip_out_of_mem:
1763
1764 lmc_trace(dev, "lmc_rx out");
1765
1766 return 0;
1767}
1768
1769static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/
1770{
1771 lmc_softc_t *sc = dev->priv;
1772 unsigned long flags;
1773
1774 lmc_trace(dev, "lmc_get_stats in");
1775
1776
1777 spin_lock_irqsave(&sc->lmc_lock, flags);
1778
1779 sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff;
1780
1781 spin_unlock_irqrestore(&sc->lmc_lock, flags);
1782
1783 lmc_trace(dev, "lmc_get_stats out");
1784
1785 return (struct net_device_stats *) &sc->stats;
1786}
1787
1788static struct pci_driver lmc_driver = {
1789 .name = "lmc",
1790 .id_table = lmc_pci_tbl,
1791 .probe = lmc_init_one,
1792 .remove = __devexit_p(lmc_remove_one),
1793};
1794
1795static int __init init_lmc(void)
1796{
1797 return pci_module_init(&lmc_driver);
1798}
1799
1800static void __exit exit_lmc(void)
1801{
1802 pci_unregister_driver(&lmc_driver);
1803}
1804
1805module_init(init_lmc);
1806module_exit(exit_lmc);
1807
1808unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
1809{
1810 int i;
1811 int command = (0xf6 << 10) | (devaddr << 5) | regno;
1812 int retval = 0;
1813
1814 lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
1815
1816 LMC_MII_SYNC (sc);
1817
1818 lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
1819
1820 for (i = 15; i >= 0; i--)
1821 {
1822 int dataval = (command & (1 << i)) ? 0x20000 : 0;
1823
1824 LMC_CSR_WRITE (sc, csr_9, dataval);
1825 lmc_delay ();
1826 /* __SLOW_DOWN_IO; */
1827 LMC_CSR_WRITE (sc, csr_9, dataval | 0x10000);
1828 lmc_delay ();
1829 /* __SLOW_DOWN_IO; */
1830 }
1831
1832 lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
1833
1834 for (i = 19; i > 0; i--)
1835 {
1836 LMC_CSR_WRITE (sc, csr_9, 0x40000);
1837 lmc_delay ();
1838 /* __SLOW_DOWN_IO; */
1839 retval = (retval << 1) | ((LMC_CSR_READ (sc, csr_9) & 0x80000) ? 1 : 0);
1840 LMC_CSR_WRITE (sc, csr_9, 0x40000 | 0x10000);
1841 lmc_delay ();
1842 /* __SLOW_DOWN_IO; */
1843 }
1844
1845 lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
1846
1847 return (retval >> 1) & 0xffff;
1848}
1849
1850void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) /*fold00*/
1851{
1852 int i = 32;
1853 int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
1854
1855 lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
1856
1857 LMC_MII_SYNC (sc);
1858
1859 i = 31;
1860 while (i >= 0)
1861 {
1862 int datav;
1863
1864 if (command & (1 << i))
1865 datav = 0x20000;
1866 else
1867 datav = 0x00000;
1868
1869 LMC_CSR_WRITE (sc, csr_9, datav);
1870 lmc_delay ();
1871 /* __SLOW_DOWN_IO; */
1872 LMC_CSR_WRITE (sc, csr_9, (datav | 0x10000));
1873 lmc_delay ();
1874 /* __SLOW_DOWN_IO; */
1875 i--;
1876 }
1877
1878 i = 2;
1879 while (i > 0)
1880 {
1881 LMC_CSR_WRITE (sc, csr_9, 0x40000);
1882 lmc_delay ();
1883 /* __SLOW_DOWN_IO; */
1884 LMC_CSR_WRITE (sc, csr_9, 0x50000);
1885 lmc_delay ();
1886 /* __SLOW_DOWN_IO; */
1887 i--;
1888 }
1889
1890 lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
1891}
1892
1893static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
1894{
1895 int i;
1896
1897 lmc_trace(sc->lmc_device, "lmc_softreset in");
1898
1899 /* Initialize the receive rings and buffers. */
1900 sc->lmc_txfull = 0;
1901 sc->lmc_next_rx = 0;
1902 sc->lmc_next_tx = 0;
1903 sc->lmc_taint_rx = 0;
1904 sc->lmc_taint_tx = 0;
1905
1906 /*
1907 * Setup each one of the receiver buffers
1908 * allocate an skbuff for each one, setup the descriptor table
1909 * and point each buffer at the next one
1910 */
1911
1912 for (i = 0; i < LMC_RXDESCS; i++)
1913 {
1914 struct sk_buff *skb;
1915
1916 if (sc->lmc_rxq[i] == NULL)
1917 {
1918 skb = dev_alloc_skb (LMC_PKT_BUF_SZ + 2);
1919 if(skb == NULL){
1920 printk(KERN_WARNING "%s: Failed to allocate receiver ring, will try again\n", sc->name);
1921 sc->failed_ring = 1;
1922 break;
1923 }
1924 else{
1925 sc->lmc_rxq[i] = skb;
1926 }
1927 }
1928 else
1929 {
1930 skb = sc->lmc_rxq[i];
1931 }
1932
1933 skb->dev = sc->lmc_device;
1934
1935 /* owned by 21140 */
1936 sc->lmc_rxring[i].status = 0x80000000;
1937
1938 /* used to be PKT_BUF_SZ now uses skb since we lose some to head room */
1939 sc->lmc_rxring[i].length = skb->end - skb->data;
1940
1941 /* use to be tail which is dumb since you're thinking why write
1942 * to the end of the packj,et but since there's nothing there tail == data
1943 */
1944 sc->lmc_rxring[i].buffer1 = virt_to_bus (skb->data);
1945
1946 /* This is fair since the structure is static and we have the next address */
1947 sc->lmc_rxring[i].buffer2 = virt_to_bus (&sc->lmc_rxring[i + 1]);
1948
1949 }
1950
1951 /*
1952 * Sets end of ring
1953 */
1954 sc->lmc_rxring[i - 1].length |= 0x02000000; /* Set end of buffers flag */
1955 sc->lmc_rxring[i - 1].buffer2 = virt_to_bus (&sc->lmc_rxring[0]); /* Point back to the start */
1956 LMC_CSR_WRITE (sc, csr_rxlist, virt_to_bus (sc->lmc_rxring)); /* write base address */
1957
1958
1959 /* Initialize the transmit rings and buffers */
1960 for (i = 0; i < LMC_TXDESCS; i++)
1961 {
1962 if (sc->lmc_txq[i] != NULL){ /* have buffer */
1963 dev_kfree_skb(sc->lmc_txq[i]); /* free it */
1964 sc->stats.tx_dropped++; /* We just dropped a packet */
1965 }
1966 sc->lmc_txq[i] = NULL;
1967 sc->lmc_txring[i].status = 0x00000000;
1968 sc->lmc_txring[i].buffer2 = virt_to_bus (&sc->lmc_txring[i + 1]);
1969 }
1970 sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
1971 LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
1972
1973 lmc_trace(sc->lmc_device, "lmc_softreset out");
1974}
1975
1976void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
1977{
1978 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
1979 sc->lmc_gpio_io &= ~bits;
1980 LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1981 lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
1982}
1983
1984void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/
1985{
1986 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
1987 sc->lmc_gpio_io |= bits;
1988 LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
1989 lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
1990}
1991
1992void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
1993{
1994 lmc_trace(sc->lmc_device, "lmc_led_on in");
1995 if((~sc->lmc_miireg16) & led){ /* Already on! */
1996 lmc_trace(sc->lmc_device, "lmc_led_on aon out");
1997 return;
1998 }
1999
2000 sc->lmc_miireg16 &= ~led;
2001 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
2002 lmc_trace(sc->lmc_device, "lmc_led_on out");
2003}
2004
2005void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/
2006{
2007 lmc_trace(sc->lmc_device, "lmc_led_off in");
2008 if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
2009 lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
2010 return;
2011 }
2012
2013 sc->lmc_miireg16 |= led;
2014 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
2015 lmc_trace(sc->lmc_device, "lmc_led_off out");
2016}
2017
2018static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
2019{
2020 lmc_trace(sc->lmc_device, "lmc_reset in");
2021 sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
2022 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
2023
2024 sc->lmc_miireg16 &= ~LMC_MII16_FIFO_RESET;
2025 lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
2026
2027 /*
2028 * make some of the GPIO pins be outputs
2029 */
2030 lmc_gpio_mkoutput(sc, LMC_GEP_RESET);
2031
2032 /*
2033 * RESET low to force state reset. This also forces
2034 * the transmitter clock to be internal, but we expect to reset
2035 * that later anyway.
2036 */
2037 sc->lmc_gpio &= ~(LMC_GEP_RESET);
2038 LMC_CSR_WRITE(sc, csr_gp, sc->lmc_gpio);
2039
2040 /*
2041 * hold for more than 10 microseconds
2042 */
2043 udelay(50);
2044
2045 /*
2046 * stop driving Xilinx-related signals
2047 */
2048 lmc_gpio_mkinput(sc, LMC_GEP_RESET);
2049
2050 /*
2051 * Call media specific init routine
2052 */
2053 sc->lmc_media->init(sc);
2054
2055 sc->stats.resetCount++;
2056 lmc_trace(sc->lmc_device, "lmc_reset out");
2057}
2058
2059static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
2060{
2061 u_int32_t val;
2062 lmc_trace(sc->lmc_device, "lmc_dec_reset in");
2063
2064 /*
2065 * disable all interrupts
2066 */
2067 sc->lmc_intrmask = 0;
2068 LMC_CSR_WRITE(sc, csr_intr, sc->lmc_intrmask);
2069
2070 /*
2071 * Reset the chip with a software reset command.
2072 * Wait 10 microseconds (actually 50 PCI cycles but at
2073 * 33MHz that comes to two microseconds but wait a
2074 * bit longer anyways)
2075 */
2076 LMC_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET);
2077 udelay(25);
2078#ifdef __sparc__
2079 sc->lmc_busmode = LMC_CSR_READ(sc, csr_busmode);
2080 sc->lmc_busmode = 0x00100000;
2081 sc->lmc_busmode &= ~TULIP_BUSMODE_SWRESET;
2082 LMC_CSR_WRITE(sc, csr_busmode, sc->lmc_busmode);
2083#endif
2084 sc->lmc_cmdmode = LMC_CSR_READ(sc, csr_command);
2085
2086 /*
2087 * We want:
2088 * no ethernet address in frames we write
2089 * disable padding (txdesc, padding disable)
2090 * ignore runt frames (rdes0 bit 15)
2091 * no receiver watchdog or transmitter jabber timer
2092 * (csr15 bit 0,14 == 1)
2093 * if using 16-bit CRC, turn off CRC (trans desc, crc disable)
2094 */
2095
2096 sc->lmc_cmdmode |= ( TULIP_CMD_PROMISCUOUS
2097 | TULIP_CMD_FULLDUPLEX
2098 | TULIP_CMD_PASSBADPKT
2099 | TULIP_CMD_NOHEARTBEAT
2100 | TULIP_CMD_PORTSELECT
2101 | TULIP_CMD_RECEIVEALL
2102 | TULIP_CMD_MUSTBEONE
2103 );
2104 sc->lmc_cmdmode &= ~( TULIP_CMD_OPERMODE
2105 | TULIP_CMD_THRESHOLDCTL
2106 | TULIP_CMD_STOREFWD
2107 | TULIP_CMD_TXTHRSHLDCTL
2108 );
2109
2110 LMC_CSR_WRITE(sc, csr_command, sc->lmc_cmdmode);
2111
2112 /*
2113 * disable receiver watchdog and transmit jabber
2114 */
2115 val = LMC_CSR_READ(sc, csr_sia_general);
2116 val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
2117 LMC_CSR_WRITE(sc, csr_sia_general, val);
2118
2119 lmc_trace(sc->lmc_device, "lmc_dec_reset out");
2120}
2121
2122static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
2123 size_t csr_size)
2124{
2125 lmc_trace(sc->lmc_device, "lmc_initcsrs in");
2126 sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size;
2127 sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size;
2128 sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size;
2129 sc->lmc_csrs.csr_rxlist = csr_base + 3 * csr_size;
2130 sc->lmc_csrs.csr_txlist = csr_base + 4 * csr_size;
2131 sc->lmc_csrs.csr_status = csr_base + 5 * csr_size;
2132 sc->lmc_csrs.csr_command = csr_base + 6 * csr_size;
2133 sc->lmc_csrs.csr_intr = csr_base + 7 * csr_size;
2134 sc->lmc_csrs.csr_missed_frames = csr_base + 8 * csr_size;
2135 sc->lmc_csrs.csr_9 = csr_base + 9 * csr_size;
2136 sc->lmc_csrs.csr_10 = csr_base + 10 * csr_size;
2137 sc->lmc_csrs.csr_11 = csr_base + 11 * csr_size;
2138 sc->lmc_csrs.csr_12 = csr_base + 12 * csr_size;
2139 sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size;
2140 sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size;
2141 sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size;
2142 lmc_trace(sc->lmc_device, "lmc_initcsrs out");
2143}
2144
2145static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/
2146 lmc_softc_t *sc;
2147 u32 csr6;
2148 unsigned long flags;
2149
2150 lmc_trace(dev, "lmc_driver_timeout in");
2151
2152 sc = dev->priv;
2153
2154 spin_lock_irqsave(&sc->lmc_lock, flags);
2155
2156 printk("%s: Xmitter busy|\n", dev->name);
2157
2158 sc->stats.tx_tbusy_calls++ ;
2159 if (jiffies - dev->trans_start < TX_TIMEOUT) {
2160 goto bug_out;
2161 }
2162
2163 /*
2164 * Chip seems to have locked up
2165 * Reset it
2166 * This whips out all our decriptor
2167 * table and starts from scartch
2168 */
2169
2170 LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO,
2171 LMC_CSR_READ (sc, csr_status),
2172 sc->stats.tx_ProcTimeout);
2173
2174 lmc_running_reset (dev);
2175
2176 LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0);
2177 LMC_EVENT_LOG(LMC_EVENT_RESET2,
2178 lmc_mii_readreg (sc, 0, 16),
2179 lmc_mii_readreg (sc, 0, 17));
2180
2181 /* restart the tx processes */
2182 csr6 = LMC_CSR_READ (sc, csr_command);
2183 LMC_CSR_WRITE (sc, csr_command, csr6 | 0x0002);
2184 LMC_CSR_WRITE (sc, csr_command, csr6 | 0x2002);
2185
2186 /* immediate transmit */
2187 LMC_CSR_WRITE (sc, csr_txpoll, 0);
2188
2189 sc->stats.tx_errors++;
2190 sc->stats.tx_ProcTimeout++; /* -baz */
2191
2192 dev->trans_start = jiffies;
2193
2194bug_out:
2195
2196 spin_unlock_irqrestore(&sc->lmc_lock, flags);
2197
2198 lmc_trace(dev, "lmc_driver_timout out");
2199
2200
2201}
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
new file mode 100644
index 000000000000..f55ce76b00ed
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -0,0 +1,1246 @@
1/* $Id: lmc_media.c,v 1.13 2000/04/11 05:25:26 asj Exp $ */
2
3#include <linux/config.h>
4#include <linux/kernel.h>
5#include <linux/string.h>
6#include <linux/timer.h>
7#include <linux/ptrace.h>
8#include <linux/errno.h>
9#include <linux/ioport.h>
10#include <linux/slab.h>
11#include <linux/interrupt.h>
12#include <linux/pci.h>
13#include <linux/in.h>
14#include <linux/if_arp.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/skbuff.h>
18#include <linux/inet.h>
19#include <linux/bitops.h>
20
21#include <net/syncppp.h>
22
23#include <asm/processor.h> /* Processor type for cache alignment. */
24#include <asm/io.h>
25#include <asm/dma.h>
26
27#include <asm/uaccess.h>
28
29#include "lmc.h"
30#include "lmc_var.h"
31#include "lmc_ioctl.h"
32#include "lmc_debug.h"
33
34#define CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE 1
35
36 /*
37 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
38 * All rights reserved. www.lanmedia.com
39 *
40 * This code is written by:
41 * Andrew Stanley-Jones (asj@cban.com)
42 * Rob Braun (bbraun@vix.com),
43 * Michael Graff (explorer@vix.com) and
44 * Matt Thomas (matt@3am-software.com).
45 *
46 * This software may be used and distributed according to the terms
47 * of the GNU General Public License version 2, incorporated herein by reference.
48 */
49
50/*
51 * For lack of a better place, put the SSI cable stuff here.
52 */
53char *lmc_t1_cables[] = {
54 "V.10/RS423", "EIA530A", "reserved", "X.21", "V.35",
55 "EIA449/EIA530/V.36", "V.28/EIA232", "none", NULL
56};
57
58/*
59 * protocol independent method.
60 */
61static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *);
62
63/*
64 * media independent methods to check on media status, link, light LEDs,
65 * etc.
66 */
67static void lmc_ds3_init (lmc_softc_t * const);
68static void lmc_ds3_default (lmc_softc_t * const);
69static void lmc_ds3_set_status (lmc_softc_t * const, lmc_ctl_t *);
70static void lmc_ds3_set_100ft (lmc_softc_t * const, int);
71static int lmc_ds3_get_link_status (lmc_softc_t * const);
72static void lmc_ds3_set_crc_length (lmc_softc_t * const, int);
73static void lmc_ds3_set_scram (lmc_softc_t * const, int);
74static void lmc_ds3_watchdog (lmc_softc_t * const);
75
76static void lmc_hssi_init (lmc_softc_t * const);
77static void lmc_hssi_default (lmc_softc_t * const);
78static void lmc_hssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
79static void lmc_hssi_set_clock (lmc_softc_t * const, int);
80static int lmc_hssi_get_link_status (lmc_softc_t * const);
81static void lmc_hssi_set_link_status (lmc_softc_t * const, int);
82static void lmc_hssi_set_crc_length (lmc_softc_t * const, int);
83static void lmc_hssi_watchdog (lmc_softc_t * const);
84
85static void lmc_ssi_init (lmc_softc_t * const);
86static void lmc_ssi_default (lmc_softc_t * const);
87static void lmc_ssi_set_status (lmc_softc_t * const, lmc_ctl_t *);
88static void lmc_ssi_set_clock (lmc_softc_t * const, int);
89static void lmc_ssi_set_speed (lmc_softc_t * const, lmc_ctl_t *);
90static int lmc_ssi_get_link_status (lmc_softc_t * const);
91static void lmc_ssi_set_link_status (lmc_softc_t * const, int);
92static void lmc_ssi_set_crc_length (lmc_softc_t * const, int);
93static void lmc_ssi_watchdog (lmc_softc_t * const);
94
95static void lmc_t1_init (lmc_softc_t * const);
96static void lmc_t1_default (lmc_softc_t * const);
97static void lmc_t1_set_status (lmc_softc_t * const, lmc_ctl_t *);
98static int lmc_t1_get_link_status (lmc_softc_t * const);
99static void lmc_t1_set_circuit_type (lmc_softc_t * const, int);
100static void lmc_t1_set_crc_length (lmc_softc_t * const, int);
101static void lmc_t1_set_clock (lmc_softc_t * const, int);
102static void lmc_t1_watchdog (lmc_softc_t * const);
103
104static void lmc_dummy_set_1 (lmc_softc_t * const, int);
105static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *);
106
107static inline void write_av9110_bit (lmc_softc_t *, int);
108static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t,
109 u_int32_t, u_int32_t);
110
111lmc_media_t lmc_ds3_media = {
112 lmc_ds3_init, /* special media init stuff */
113 lmc_ds3_default, /* reset to default state */
114 lmc_ds3_set_status, /* reset status to state provided */
115 lmc_dummy_set_1, /* set clock source */
116 lmc_dummy_set2_1, /* set line speed */
117 lmc_ds3_set_100ft, /* set cable length */
118 lmc_ds3_set_scram, /* set scrambler */
119 lmc_ds3_get_link_status, /* get link status */
120 lmc_dummy_set_1, /* set link status */
121 lmc_ds3_set_crc_length, /* set CRC length */
122 lmc_dummy_set_1, /* set T1 or E1 circuit type */
123 lmc_ds3_watchdog
124};
125
126lmc_media_t lmc_hssi_media = {
127 lmc_hssi_init, /* special media init stuff */
128 lmc_hssi_default, /* reset to default state */
129 lmc_hssi_set_status, /* reset status to state provided */
130 lmc_hssi_set_clock, /* set clock source */
131 lmc_dummy_set2_1, /* set line speed */
132 lmc_dummy_set_1, /* set cable length */
133 lmc_dummy_set_1, /* set scrambler */
134 lmc_hssi_get_link_status, /* get link status */
135 lmc_hssi_set_link_status, /* set link status */
136 lmc_hssi_set_crc_length, /* set CRC length */
137 lmc_dummy_set_1, /* set T1 or E1 circuit type */
138 lmc_hssi_watchdog
139};
140
141lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
142 lmc_ssi_default, /* reset to default state */
143 lmc_ssi_set_status, /* reset status to state provided */
144 lmc_ssi_set_clock, /* set clock source */
145 lmc_ssi_set_speed, /* set line speed */
146 lmc_dummy_set_1, /* set cable length */
147 lmc_dummy_set_1, /* set scrambler */
148 lmc_ssi_get_link_status, /* get link status */
149 lmc_ssi_set_link_status, /* set link status */
150 lmc_ssi_set_crc_length, /* set CRC length */
151 lmc_dummy_set_1, /* set T1 or E1 circuit type */
152 lmc_ssi_watchdog
153};
154
155lmc_media_t lmc_t1_media = {
156 lmc_t1_init, /* special media init stuff */
157 lmc_t1_default, /* reset to default state */
158 lmc_t1_set_status, /* reset status to state provided */
159 lmc_t1_set_clock, /* set clock source */
160 lmc_dummy_set2_1, /* set line speed */
161 lmc_dummy_set_1, /* set cable length */
162 lmc_dummy_set_1, /* set scrambler */
163 lmc_t1_get_link_status, /* get link status */
164 lmc_dummy_set_1, /* set link status */
165 lmc_t1_set_crc_length, /* set CRC length */
166 lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
167 lmc_t1_watchdog
168};
169
170static void
171lmc_dummy_set_1 (lmc_softc_t * const sc, int a)
172{
173}
174
175static void
176lmc_dummy_set2_1 (lmc_softc_t * const sc, lmc_ctl_t * a)
177{
178}
179
180/*
181 * HSSI methods
182 */
183
184static void
185lmc_hssi_init (lmc_softc_t * const sc)
186{
187 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5200;
188
189 lmc_gpio_mkoutput (sc, LMC_GEP_HSSI_CLOCK);
190}
191
192static void
193lmc_hssi_default (lmc_softc_t * const sc)
194{
195 sc->lmc_miireg16 = LMC_MII16_LED_ALL;
196
197 sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
198 sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
199 sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
200}
201
202/*
203 * Given a user provided state, set ourselves up to match it. This will
204 * always reset the card if needed.
205 */
206static void
207lmc_hssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
208{
209 if (ctl == NULL)
210 {
211 sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
212 lmc_set_protocol (sc, NULL);
213
214 return;
215 }
216
217 /*
218 * check for change in clock source
219 */
220 if (ctl->clock_source && !sc->ictl.clock_source)
221 {
222 sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
223 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
224 }
225 else if (!ctl->clock_source && sc->ictl.clock_source)
226 {
227 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
228 sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
229 }
230
231 lmc_set_protocol (sc, ctl);
232}
233
234/*
235 * 1 == internal, 0 == external
236 */
237static void
238lmc_hssi_set_clock (lmc_softc_t * const sc, int ie)
239{
240 int old;
241 old = sc->ictl.clock_source;
242 if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
243 {
244 sc->lmc_gpio |= LMC_GEP_HSSI_CLOCK;
245 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
246 sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
247 if(old != ie)
248 printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
249 }
250 else
251 {
252 sc->lmc_gpio &= ~(LMC_GEP_HSSI_CLOCK);
253 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
254 sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
255 if(old != ie)
256 printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
257 }
258}
259
260/*
261 * return hardware link status.
262 * 0 == link is down, 1 == link is up.
263 */
264static int
265lmc_hssi_get_link_status (lmc_softc_t * const sc)
266{
267 /*
268 * We're using the same code as SSI since
269 * they're practically the same
270 */
271 return lmc_ssi_get_link_status(sc);
272}
273
274static void
275lmc_hssi_set_link_status (lmc_softc_t * const sc, int state)
276{
277 if (state == LMC_LINK_UP)
278 sc->lmc_miireg16 |= LMC_MII16_HSSI_TA;
279 else
280 sc->lmc_miireg16 &= ~LMC_MII16_HSSI_TA;
281
282 lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
283}
284
285/*
286 * 0 == 16bit, 1 == 32bit
287 */
288static void
289lmc_hssi_set_crc_length (lmc_softc_t * const sc, int state)
290{
291 if (state == LMC_CTL_CRC_LENGTH_32)
292 {
293 /* 32 bit */
294 sc->lmc_miireg16 |= LMC_MII16_HSSI_CRC;
295 sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
296 }
297 else
298 {
299 /* 16 bit */
300 sc->lmc_miireg16 &= ~LMC_MII16_HSSI_CRC;
301 sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
302 }
303
304 lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
305}
306
307static void
308lmc_hssi_watchdog (lmc_softc_t * const sc)
309{
310 /* HSSI is blank */
311}
312
313/*
314 * DS3 methods
315 */
316
317/*
318 * Set cable length
319 */
320static void
321lmc_ds3_set_100ft (lmc_softc_t * const sc, int ie)
322{
323 if (ie == LMC_CTL_CABLE_LENGTH_GT_100FT)
324 {
325 sc->lmc_miireg16 &= ~LMC_MII16_DS3_ZERO;
326 sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_GT_100FT;
327 }
328 else if (ie == LMC_CTL_CABLE_LENGTH_LT_100FT)
329 {
330 sc->lmc_miireg16 |= LMC_MII16_DS3_ZERO;
331 sc->ictl.cable_length = LMC_CTL_CABLE_LENGTH_LT_100FT;
332 }
333 lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
334}
335
336static void
337lmc_ds3_default (lmc_softc_t * const sc)
338{
339 sc->lmc_miireg16 = LMC_MII16_LED_ALL;
340
341 sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
342 sc->lmc_media->set_cable_length (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
343 sc->lmc_media->set_scrambler (sc, LMC_CTL_OFF);
344 sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
345}
346
347/*
348 * Given a user provided state, set ourselves up to match it. This will
349 * always reset the card if needed.
350 */
351static void
352lmc_ds3_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
353{
354 if (ctl == NULL)
355 {
356 sc->lmc_media->set_cable_length (sc, sc->ictl.cable_length);
357 sc->lmc_media->set_scrambler (sc, sc->ictl.scrambler_onoff);
358 lmc_set_protocol (sc, NULL);
359
360 return;
361 }
362
363 /*
364 * check for change in cable length setting
365 */
366 if (ctl->cable_length && !sc->ictl.cable_length)
367 lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_GT_100FT);
368 else if (!ctl->cable_length && sc->ictl.cable_length)
369 lmc_ds3_set_100ft (sc, LMC_CTL_CABLE_LENGTH_LT_100FT);
370
371 /*
372 * Check for change in scrambler setting (requires reset)
373 */
374 if (ctl->scrambler_onoff && !sc->ictl.scrambler_onoff)
375 lmc_ds3_set_scram (sc, LMC_CTL_ON);
376 else if (!ctl->scrambler_onoff && sc->ictl.scrambler_onoff)
377 lmc_ds3_set_scram (sc, LMC_CTL_OFF);
378
379 lmc_set_protocol (sc, ctl);
380}
381
382static void
383lmc_ds3_init (lmc_softc_t * const sc)
384{
385 int i;
386
387 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC5245;
388
389 /* writes zeros everywhere */
390 for (i = 0; i < 21; i++)
391 {
392 lmc_mii_writereg (sc, 0, 17, i);
393 lmc_mii_writereg (sc, 0, 18, 0);
394 }
395
396 /* set some essential bits */
397 lmc_mii_writereg (sc, 0, 17, 1);
398 lmc_mii_writereg (sc, 0, 18, 0x25); /* ser, xtx */
399
400 lmc_mii_writereg (sc, 0, 17, 5);
401 lmc_mii_writereg (sc, 0, 18, 0x80); /* emode */
402
403 lmc_mii_writereg (sc, 0, 17, 14);
404 lmc_mii_writereg (sc, 0, 18, 0x30); /* rcgen, tcgen */
405
406 /* clear counters and latched bits */
407 for (i = 0; i < 21; i++)
408 {
409 lmc_mii_writereg (sc, 0, 17, i);
410 lmc_mii_readreg (sc, 0, 18);
411 }
412}
413
414/*
415 * 1 == DS3 payload scrambled, 0 == not scrambled
416 */
417static void
418lmc_ds3_set_scram (lmc_softc_t * const sc, int ie)
419{
420 if (ie == LMC_CTL_ON)
421 {
422 sc->lmc_miireg16 |= LMC_MII16_DS3_SCRAM;
423 sc->ictl.scrambler_onoff = LMC_CTL_ON;
424 }
425 else
426 {
427 sc->lmc_miireg16 &= ~LMC_MII16_DS3_SCRAM;
428 sc->ictl.scrambler_onoff = LMC_CTL_OFF;
429 }
430 lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
431}
432
433/*
434 * return hardware link status.
435 * 0 == link is down, 1 == link is up.
436 */
437static int
438lmc_ds3_get_link_status (lmc_softc_t * const sc)
439{
440 u_int16_t link_status, link_status_11;
441 int ret = 1;
442
443 lmc_mii_writereg (sc, 0, 17, 7);
444 link_status = lmc_mii_readreg (sc, 0, 18);
445
446 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
447 * led0 yellow = far-end adapter is in Red alarm condition
448 * led1 blue = received an Alarm Indication signal
449 * (upstream failure)
450 * led2 Green = power to adapter, Gate Array loaded & driver
451 * attached
452 * led3 red = Loss of Signal (LOS) or out of frame (OOF)
453 * conditions detected on T3 receive signal
454 */
455
456 lmc_led_on(sc, LMC_DS3_LED2);
457
458 if ((link_status & LMC_FRAMER_REG0_DLOS) ||
459 (link_status & LMC_FRAMER_REG0_OOFS)){
460 ret = 0;
461 if(sc->last_led_err[3] != 1){
462 u16 r1;
463 lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */
464 r1 = lmc_mii_readreg (sc, 0, 18);
465 r1 &= 0xfe;
466 lmc_mii_writereg(sc, 0, 18, r1);
467 printk(KERN_WARNING "%s: Red Alarm - Loss of Signal or Loss of Framing\n", sc->name);
468 }
469 lmc_led_on(sc, LMC_DS3_LED3); /* turn on red LED */
470 sc->last_led_err[3] = 1;
471 }
472 else {
473 lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */
474 if(sc->last_led_err[3] == 1){
475 u16 r1;
476 lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */
477 r1 = lmc_mii_readreg (sc, 0, 18);
478 r1 |= 0x01;
479 lmc_mii_writereg(sc, 0, 18, r1);
480 }
481 sc->last_led_err[3] = 0;
482 }
483
484 lmc_mii_writereg(sc, 0, 17, 0x10);
485 link_status_11 = lmc_mii_readreg(sc, 0, 18);
486 if((link_status & LMC_FRAMER_REG0_AIS) ||
487 (link_status_11 & LMC_FRAMER_REG10_XBIT)) {
488 ret = 0;
489 if(sc->last_led_err[0] != 1){
490 printk(KERN_WARNING "%s: AIS Alarm or XBit Error\n", sc->name);
491 printk(KERN_WARNING "%s: Remote end has loss of signal or framing\n", sc->name);
492 }
493 lmc_led_on(sc, LMC_DS3_LED0);
494 sc->last_led_err[0] = 1;
495 }
496 else {
497 lmc_led_off(sc, LMC_DS3_LED0);
498 sc->last_led_err[0] = 0;
499 }
500
501 lmc_mii_writereg (sc, 0, 17, 9);
502 link_status = lmc_mii_readreg (sc, 0, 18);
503
504 if(link_status & LMC_FRAMER_REG9_RBLUE){
505 ret = 0;
506 if(sc->last_led_err[1] != 1){
507 printk(KERN_WARNING "%s: Blue Alarm - Receiving all 1's\n", sc->name);
508 }
509 lmc_led_on(sc, LMC_DS3_LED1);
510 sc->last_led_err[1] = 1;
511 }
512 else {
513 lmc_led_off(sc, LMC_DS3_LED1);
514 sc->last_led_err[1] = 0;
515 }
516
517 return ret;
518}
519
520/*
521 * 0 == 16bit, 1 == 32bit
522 */
523static void
524lmc_ds3_set_crc_length (lmc_softc_t * const sc, int state)
525{
526 if (state == LMC_CTL_CRC_LENGTH_32)
527 {
528 /* 32 bit */
529 sc->lmc_miireg16 |= LMC_MII16_DS3_CRC;
530 sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
531 }
532 else
533 {
534 /* 16 bit */
535 sc->lmc_miireg16 &= ~LMC_MII16_DS3_CRC;
536 sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
537 }
538
539 lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
540}
541
542static void
543lmc_ds3_watchdog (lmc_softc_t * const sc)
544{
545
546}
547
548
549/*
550 * SSI methods
551 */
552
553static void
554lmc_ssi_init (lmc_softc_t * const sc)
555{
556 u_int16_t mii17;
557 int cable;
558
559 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000;
560
561 mii17 = lmc_mii_readreg (sc, 0, 17);
562
563 cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT;
564 sc->ictl.cable_type = cable;
565
566 lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
567}
568
569static void
570lmc_ssi_default (lmc_softc_t * const sc)
571{
572 sc->lmc_miireg16 = LMC_MII16_LED_ALL;
573
574 /*
575 * make TXCLOCK always be an output
576 */
577 lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK);
578
579 sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
580 sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
581 sc->lmc_media->set_speed (sc, NULL);
582 sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
583}
584
585/*
586 * Given a user provided state, set ourselves up to match it. This will
587 * always reset the card if needed.
588 */
589static void
590lmc_ssi_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
591{
592 if (ctl == NULL)
593 {
594 sc->lmc_media->set_clock_source (sc, sc->ictl.clock_source);
595 sc->lmc_media->set_speed (sc, &sc->ictl);
596 lmc_set_protocol (sc, NULL);
597
598 return;
599 }
600
601 /*
602 * check for change in clock source
603 */
604 if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_INT
605 && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_EXT)
606 {
607 sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_INT);
608 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_INT;
609 }
610 else if (ctl->clock_source == LMC_CTL_CLOCK_SOURCE_EXT
611 && sc->ictl.clock_source == LMC_CTL_CLOCK_SOURCE_INT)
612 {
613 sc->lmc_media->set_clock_source (sc, LMC_CTL_CLOCK_SOURCE_EXT);
614 sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT;
615 }
616
617 if (ctl->clock_rate != sc->ictl.clock_rate)
618 sc->lmc_media->set_speed (sc, ctl);
619
620 lmc_set_protocol (sc, ctl);
621}
622
623/*
624 * 1 == internal, 0 == external
625 */
626static void
627lmc_ssi_set_clock (lmc_softc_t * const sc, int ie)
628{
629 int old;
630 old = ie;
631 if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
632 {
633 sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
634 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
635 sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
636 if(ie != old)
637 printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
638 }
639 else
640 {
641 sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
642 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
643 sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
644 if(ie != old)
645 printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
646 }
647}
648
649static void
650lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl)
651{
652 lmc_ctl_t *ictl = &sc->ictl;
653 lmc_av9110_t *av;
654
655 /* original settings for clock rate of:
656 * 100 Khz (8,25,0,0,2) were incorrect
657 * they should have been 80,125,1,3,3
658 * There are 17 param combinations to produce this freq.
659 * For 1.5 Mhz use 120,100,1,1,2 (226 param. combinations)
660 */
661 if (ctl == NULL)
662 {
663 av = &ictl->cardspec.ssi;
664 ictl->clock_rate = 1500000;
665 av->f = ictl->clock_rate;
666 av->n = 120;
667 av->m = 100;
668 av->v = 1;
669 av->x = 1;
670 av->r = 2;
671
672 write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
673 return;
674 }
675
676 av = &ctl->cardspec.ssi;
677
678 if (av->f == 0)
679 return;
680
681 ictl->clock_rate = av->f; /* really, this is the rate we are */
682 ictl->cardspec.ssi = *av;
683
684 write_av9110 (sc, av->n, av->m, av->v, av->x, av->r);
685}
686
687/*
688 * return hardware link status.
689 * 0 == link is down, 1 == link is up.
690 */
691static int
692lmc_ssi_get_link_status (lmc_softc_t * const sc)
693{
694 u_int16_t link_status;
695 u_int32_t ticks;
696 int ret = 1;
697 int hw_hdsk = 1;
698
699 /*
700 * missing CTS? Hmm. If we require CTS on, we may never get the
701 * link to come up, so omit it in this test.
702 *
703 * Also, it seems that with a loopback cable, DCD isn't asserted,
704 * so just check for things like this:
705 * DSR _must_ be asserted.
706 * One of DCD or CTS must be asserted.
707 */
708
709 /* LMC 1000 (SSI) LED definitions
710 * led0 Green = power to adapter, Gate Array loaded &
711 * driver attached
712 * led1 Green = DSR and DTR and RTS and CTS are set
713 * led2 Green = Cable detected
714 * led3 red = No timing is available from the
715 * cable or the on-board frequency
716 * generator.
717 */
718
719 link_status = lmc_mii_readreg (sc, 0, 16);
720
721 /* Is the transmit clock still available */
722 ticks = LMC_CSR_READ (sc, csr_gp_timer);
723 ticks = 0x0000ffff - (ticks & 0x0000ffff);
724
725 lmc_led_on (sc, LMC_MII16_LED0);
726
727 /* ====== transmit clock determination ===== */
728 if (sc->lmc_timing == LMC_CTL_CLOCK_SOURCE_INT) {
729 lmc_led_off(sc, LMC_MII16_LED3);
730 }
731 else if (ticks == 0 ) { /* no clock found ? */
732 ret = 0;
733 if(sc->last_led_err[3] != 1){
734 sc->stats.tx_lossOfClockCnt++;
735 printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name);
736 }
737 sc->last_led_err[3] = 1;
738 lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */
739 }
740 else {
741 if(sc->last_led_err[3] == 1)
742 printk(KERN_WARNING "%s: Clock Returned\n", sc->name);
743 sc->last_led_err[3] = 0;
744 lmc_led_off (sc, LMC_MII16_LED3); /* turn OFF red LED */
745 }
746
747 if ((link_status & LMC_MII16_SSI_DSR) == 0) { /* Also HSSI CA */
748 ret = 0;
749 hw_hdsk = 0;
750 }
751
752#ifdef CONFIG_LMC_IGNORE_HARDWARE_HANDSHAKE
753 if ((link_status & (LMC_MII16_SSI_CTS | LMC_MII16_SSI_DCD)) == 0){
754 ret = 0;
755 hw_hdsk = 0;
756 }
757#endif
758
759 if(hw_hdsk == 0){
760 if(sc->last_led_err[1] != 1)
761 printk(KERN_WARNING "%s: DSR not asserted\n", sc->name);
762 sc->last_led_err[1] = 1;
763 lmc_led_off(sc, LMC_MII16_LED1);
764 }
765 else {
766 if(sc->last_led_err[1] != 0)
767 printk(KERN_WARNING "%s: DSR now asserted\n", sc->name);
768 sc->last_led_err[1] = 0;
769 lmc_led_on(sc, LMC_MII16_LED1);
770 }
771
772 if(ret == 1) {
773 lmc_led_on(sc, LMC_MII16_LED2); /* Over all good status? */
774 }
775
776 return ret;
777}
778
779static void
780lmc_ssi_set_link_status (lmc_softc_t * const sc, int state)
781{
782 if (state == LMC_LINK_UP)
783 {
784 sc->lmc_miireg16 |= (LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
785 printk (LMC_PRINTF_FMT ": asserting DTR and RTS\n", LMC_PRINTF_ARGS);
786 }
787 else
788 {
789 sc->lmc_miireg16 &= ~(LMC_MII16_SSI_DTR | LMC_MII16_SSI_RTS);
790 printk (LMC_PRINTF_FMT ": deasserting DTR and RTS\n", LMC_PRINTF_ARGS);
791 }
792
793 lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
794
795}
796
797/*
798 * 0 == 16bit, 1 == 32bit
799 */
800static void
801lmc_ssi_set_crc_length (lmc_softc_t * const sc, int state)
802{
803 if (state == LMC_CTL_CRC_LENGTH_32)
804 {
805 /* 32 bit */
806 sc->lmc_miireg16 |= LMC_MII16_SSI_CRC;
807 sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
808 sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
809
810 }
811 else
812 {
813 /* 16 bit */
814 sc->lmc_miireg16 &= ~LMC_MII16_SSI_CRC;
815 sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
816 sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
817 }
818
819 lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
820}
821
822/*
823 * These are bits to program the ssi frequency generator
824 */
825static inline void
826write_av9110_bit (lmc_softc_t * sc, int c)
827{
828 /*
829 * set the data bit as we need it.
830 */
831 sc->lmc_gpio &= ~(LMC_GEP_CLK);
832 if (c & 0x01)
833 sc->lmc_gpio |= LMC_GEP_DATA;
834 else
835 sc->lmc_gpio &= ~(LMC_GEP_DATA);
836 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
837
838 /*
839 * set the clock to high
840 */
841 sc->lmc_gpio |= LMC_GEP_CLK;
842 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
843
844 /*
845 * set the clock to low again.
846 */
847 sc->lmc_gpio &= ~(LMC_GEP_CLK);
848 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
849}
850
851static void
852write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v,
853 u_int32_t x, u_int32_t r)
854{
855 int i;
856
857#if 0
858 printk (LMC_PRINTF_FMT ": speed %u, %d %d %d %d %d\n",
859 LMC_PRINTF_ARGS, sc->ictl.clock_rate, n, m, v, x, r);
860#endif
861
862 sc->lmc_gpio |= LMC_GEP_SSI_GENERATOR;
863 sc->lmc_gpio &= ~(LMC_GEP_DATA | LMC_GEP_CLK);
864 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
865
866 /*
867 * Set the TXCLOCK, GENERATOR, SERIAL, and SERIALCLK
868 * as outputs.
869 */
870 lmc_gpio_mkoutput (sc, (LMC_GEP_DATA | LMC_GEP_CLK
871 | LMC_GEP_SSI_GENERATOR));
872
873 sc->lmc_gpio &= ~(LMC_GEP_SSI_GENERATOR);
874 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
875
876 /*
877 * a shifting we will go...
878 */
879 for (i = 0; i < 7; i++)
880 write_av9110_bit (sc, n >> i);
881 for (i = 0; i < 7; i++)
882 write_av9110_bit (sc, m >> i);
883 for (i = 0; i < 1; i++)
884 write_av9110_bit (sc, v >> i);
885 for (i = 0; i < 2; i++)
886 write_av9110_bit (sc, x >> i);
887 for (i = 0; i < 2; i++)
888 write_av9110_bit (sc, r >> i);
889 for (i = 0; i < 5; i++)
890 write_av9110_bit (sc, 0x17 >> i);
891
892 /*
893 * stop driving serial-related signals
894 */
895 lmc_gpio_mkinput (sc,
896 (LMC_GEP_DATA | LMC_GEP_CLK
897 | LMC_GEP_SSI_GENERATOR));
898}
899
900static void
901lmc_ssi_watchdog (lmc_softc_t * const sc)
902{
903 u_int16_t mii17;
904 struct ssicsr2
905 {
906 unsigned short dtr:1, dsr:1, rts:1, cable:3, crc:1, led0:1, led1:1,
907 led2:1, led3:1, fifo:1, ll:1, rl:1, tm:1, loop:1;
908 };
909 struct ssicsr2 *ssicsr;
910 mii17 = lmc_mii_readreg (sc, 0, 17);
911 ssicsr = (struct ssicsr2 *) &mii17;
912 if (ssicsr->cable == 7)
913 {
914 lmc_led_off (sc, LMC_MII16_LED2);
915 }
916 else
917 {
918 lmc_led_on (sc, LMC_MII16_LED2);
919 }
920
921}
922
923/*
924 * T1 methods
925 */
926
927/*
928 * The framer regs are multiplexed through MII regs 17 & 18
929 * write the register address to MII reg 17 and the * data to MII reg 18. */
930static void
931lmc_t1_write (lmc_softc_t * const sc, int a, int d)
932{
933 lmc_mii_writereg (sc, 0, 17, a);
934 lmc_mii_writereg (sc, 0, 18, d);
935}
936
937/* Save a warning
938static int
939lmc_t1_read (lmc_softc_t * const sc, int a)
940{
941 lmc_mii_writereg (sc, 0, 17, a);
942 return lmc_mii_readreg (sc, 0, 18);
943}
944*/
945
946
947static void
948lmc_t1_init (lmc_softc_t * const sc)
949{
950 u_int16_t mii16;
951 int i;
952
953 sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200;
954 mii16 = lmc_mii_readreg (sc, 0, 16);
955
956 /* reset 8370 */
957 mii16 &= ~LMC_MII16_T1_RST;
958 lmc_mii_writereg (sc, 0, 16, mii16 | LMC_MII16_T1_RST);
959 lmc_mii_writereg (sc, 0, 16, mii16);
960
961 /* set T1 or E1 line. Uses sc->lmcmii16 reg in function so update it */
962 sc->lmc_miireg16 = mii16;
963 lmc_t1_set_circuit_type(sc, LMC_CTL_CIRCUIT_TYPE_T1);
964 mii16 = sc->lmc_miireg16;
965
966 lmc_t1_write (sc, 0x01, 0x1B); /* CR0 - primary control */
967 lmc_t1_write (sc, 0x02, 0x42); /* JAT_CR - jitter atten config */
968 lmc_t1_write (sc, 0x14, 0x00); /* LOOP - loopback config */
969 lmc_t1_write (sc, 0x15, 0x00); /* DL3_TS - external data link timeslot */
970 lmc_t1_write (sc, 0x18, 0xFF); /* PIO - programmable I/O */
971 lmc_t1_write (sc, 0x19, 0x30); /* POE - programmable OE */
972 lmc_t1_write (sc, 0x1A, 0x0F); /* CMUX - clock input mux */
973 lmc_t1_write (sc, 0x20, 0x41); /* LIU_CR - RX LIU config */
974 lmc_t1_write (sc, 0x22, 0x76); /* RLIU_CR - RX LIU config */
975 lmc_t1_write (sc, 0x40, 0x03); /* RCR0 - RX config */
976 lmc_t1_write (sc, 0x45, 0x00); /* RALM - RX alarm config */
977 lmc_t1_write (sc, 0x46, 0x05); /* LATCH - RX alarm/err/cntr latch */
978 lmc_t1_write (sc, 0x68, 0x40); /* TLIU_CR - TX LIU config */
979 lmc_t1_write (sc, 0x70, 0x0D); /* TCR0 - TX framer config */
980 lmc_t1_write (sc, 0x71, 0x05); /* TCR1 - TX config */
981 lmc_t1_write (sc, 0x72, 0x0B); /* TFRM - TX frame format */
982 lmc_t1_write (sc, 0x73, 0x00); /* TERROR - TX error insert */
983 lmc_t1_write (sc, 0x74, 0x00); /* TMAN - TX manual Sa/FEBE config */
984 lmc_t1_write (sc, 0x75, 0x00); /* TALM - TX alarm signal config */
985 lmc_t1_write (sc, 0x76, 0x00); /* TPATT - TX test pattern config */
986 lmc_t1_write (sc, 0x77, 0x00); /* TLB - TX inband loopback config */
987 lmc_t1_write (sc, 0x90, 0x05); /* CLAD_CR - clock rate adapter config */
988 lmc_t1_write (sc, 0x91, 0x05); /* CSEL - clad freq sel */
989 lmc_t1_write (sc, 0xA6, 0x00); /* DL1_CTL - DL1 control */
990 lmc_t1_write (sc, 0xB1, 0x00); /* DL2_CTL - DL2 control */
991 lmc_t1_write (sc, 0xD0, 0x47); /* SBI_CR - sys bus iface config */
992 lmc_t1_write (sc, 0xD1, 0x70); /* RSB_CR - RX sys bus config */
993 lmc_t1_write (sc, 0xD4, 0x30); /* TSB_CR - TX sys bus config */
994 for (i = 0; i < 32; i++)
995 {
996 lmc_t1_write (sc, 0x0E0 + i, 0x00); /* SBCn - sys bus per-channel ctl */
997 lmc_t1_write (sc, 0x100 + i, 0x00); /* TPCn - TX per-channel ctl */
998 lmc_t1_write (sc, 0x180 + i, 0x00); /* RPCn - RX per-channel ctl */
999 }
1000 for (i = 1; i < 25; i++)
1001 {
1002 lmc_t1_write (sc, 0x0E0 + i, 0x0D); /* SBCn - sys bus per-channel ctl */
1003 }
1004
1005 mii16 |= LMC_MII16_T1_XOE;
1006 lmc_mii_writereg (sc, 0, 16, mii16);
1007 sc->lmc_miireg16 = mii16;
1008}
1009
1010static void
1011lmc_t1_default (lmc_softc_t * const sc)
1012{
1013 sc->lmc_miireg16 = LMC_MII16_LED_ALL;
1014 sc->lmc_media->set_link_status (sc, LMC_LINK_DOWN);
1015 sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
1016 sc->lmc_media->set_crc_length (sc, LMC_CTL_CRC_LENGTH_16);
1017 /* Right now we can only clock from out internal source */
1018 sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
1019}
1020/* * Given a user provided state, set ourselves up to match it. This will * always reset the card if needed.
1021 */
1022static void
1023lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl)
1024{
1025 if (ctl == NULL)
1026 {
1027 sc->lmc_media->set_circuit_type (sc, sc->ictl.circuit_type);
1028 lmc_set_protocol (sc, NULL);
1029
1030 return;
1031 }
1032 /*
1033 * check for change in circuit type */
1034 if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_T1
1035 && sc->ictl.circuit_type ==
1036 LMC_CTL_CIRCUIT_TYPE_E1) sc->lmc_media->set_circuit_type (sc,
1037 LMC_CTL_CIRCUIT_TYPE_E1);
1038 else if (ctl->circuit_type == LMC_CTL_CIRCUIT_TYPE_E1
1039 && sc->ictl.circuit_type == LMC_CTL_CIRCUIT_TYPE_T1)
1040 sc->lmc_media->set_circuit_type (sc, LMC_CTL_CIRCUIT_TYPE_T1);
1041 lmc_set_protocol (sc, ctl);
1042}
1043/*
1044 * return hardware link status.
1045 * 0 == link is down, 1 == link is up.
1046 */ static int
1047lmc_t1_get_link_status (lmc_softc_t * const sc)
1048{
1049 u_int16_t link_status;
1050 int ret = 1;
1051
1052 /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions
1053 * led0 yellow = far-end adapter is in Red alarm condition
1054 * led1 blue = received an Alarm Indication signal
1055 * (upstream failure)
1056 * led2 Green = power to adapter, Gate Array loaded & driver
1057 * attached
1058 * led3 red = Loss of Signal (LOS) or out of frame (OOF)
1059 * conditions detected on T3 receive signal
1060 */
1061 lmc_trace(sc->lmc_device, "lmc_t1_get_link_status in");
1062 lmc_led_on(sc, LMC_DS3_LED2);
1063
1064 lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS);
1065 link_status = lmc_mii_readreg (sc, 0, 18);
1066
1067
1068 if (link_status & T1F_RAIS) { /* turn on blue LED */
1069 ret = 0;
1070 if(sc->last_led_err[1] != 1){
1071 printk(KERN_WARNING "%s: Receive AIS/Blue Alarm. Far end in RED alarm\n", sc->name);
1072 }
1073 lmc_led_on(sc, LMC_DS3_LED1);
1074 sc->last_led_err[1] = 1;
1075 }
1076 else {
1077 if(sc->last_led_err[1] != 0){
1078 printk(KERN_WARNING "%s: End AIS/Blue Alarm\n", sc->name);
1079 }
1080 lmc_led_off (sc, LMC_DS3_LED1);
1081 sc->last_led_err[1] = 0;
1082 }
1083
1084 /*
1085 * Yellow Alarm is nasty evil stuff, looks at data patterns
1086 * inside the channel and confuses it with HDLC framing
1087 * ignore all yellow alarms.
1088 *
1089 * Do listen to MultiFrame Yellow alarm which while implemented
1090 * different ways isn't in the channel and hence somewhat
1091 * more reliable
1092 */
1093
1094 if (link_status & T1F_RMYEL) {
1095 ret = 0;
1096 if(sc->last_led_err[0] != 1){
1097 printk(KERN_WARNING "%s: Receive Yellow AIS Alarm\n", sc->name);
1098 }
1099 lmc_led_on(sc, LMC_DS3_LED0);
1100 sc->last_led_err[0] = 1;
1101 }
1102 else {
1103 if(sc->last_led_err[0] != 0){
1104 printk(KERN_WARNING "%s: End of Yellow AIS Alarm\n", sc->name);
1105 }
1106 lmc_led_off(sc, LMC_DS3_LED0);
1107 sc->last_led_err[0] = 0;
1108 }
1109
1110 /*
1111 * Loss of signal and los of frame
1112 * Use the green bit to identify which one lit the led
1113 */
1114 if(link_status & T1F_RLOF){
1115 ret = 0;
1116 if(sc->last_led_err[3] != 1){
1117 printk(KERN_WARNING "%s: Local Red Alarm: Loss of Framing\n", sc->name);
1118 }
1119 lmc_led_on(sc, LMC_DS3_LED3);
1120 sc->last_led_err[3] = 1;
1121
1122 }
1123 else {
1124 if(sc->last_led_err[3] != 0){
1125 printk(KERN_WARNING "%s: End Red Alarm (LOF)\n", sc->name);
1126 }
1127 if( ! (link_status & T1F_RLOS))
1128 lmc_led_off(sc, LMC_DS3_LED3);
1129 sc->last_led_err[3] = 0;
1130 }
1131
1132 if(link_status & T1F_RLOS){
1133 ret = 0;
1134 if(sc->last_led_err[2] != 1){
1135 printk(KERN_WARNING "%s: Local Red Alarm: Loss of Signal\n", sc->name);
1136 }
1137 lmc_led_on(sc, LMC_DS3_LED3);
1138 sc->last_led_err[2] = 1;
1139
1140 }
1141 else {
1142 if(sc->last_led_err[2] != 0){
1143 printk(KERN_WARNING "%s: End Red Alarm (LOS)\n", sc->name);
1144 }
1145 if( ! (link_status & T1F_RLOF))
1146 lmc_led_off(sc, LMC_DS3_LED3);
1147 sc->last_led_err[2] = 0;
1148 }
1149
1150 sc->lmc_xinfo.t1_alarm1_status = link_status;
1151
1152 lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS);
1153 sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18);
1154
1155
1156 lmc_trace(sc->lmc_device, "lmc_t1_get_link_status out");
1157
1158 return ret;
1159}
1160
1161/*
1162 * 1 == T1 Circuit Type , 0 == E1 Circuit Type
1163 */
1164static void
1165lmc_t1_set_circuit_type (lmc_softc_t * const sc, int ie)
1166{
1167 if (ie == LMC_CTL_CIRCUIT_TYPE_T1) {
1168 sc->lmc_miireg16 |= LMC_MII16_T1_Z;
1169 sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_T1;
1170 printk(KERN_INFO "%s: In T1 Mode\n", sc->name);
1171 }
1172 else {
1173 sc->lmc_miireg16 &= ~LMC_MII16_T1_Z;
1174 sc->ictl.circuit_type = LMC_CTL_CIRCUIT_TYPE_E1;
1175 printk(KERN_INFO "%s: In E1 Mode\n", sc->name);
1176 }
1177
1178 lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
1179
1180}
1181
1182/*
1183 * 0 == 16bit, 1 == 32bit */
1184static void
1185lmc_t1_set_crc_length (lmc_softc_t * const sc, int state)
1186{
1187 if (state == LMC_CTL_CRC_LENGTH_32)
1188 {
1189 /* 32 bit */
1190 sc->lmc_miireg16 |= LMC_MII16_T1_CRC;
1191 sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_32;
1192 sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_4;
1193
1194 }
1195 else
1196 {
1197 /* 16 bit */ sc->lmc_miireg16 &= ~LMC_MII16_T1_CRC;
1198 sc->ictl.crc_length = LMC_CTL_CRC_LENGTH_16;
1199 sc->lmc_crcSize = LMC_CTL_CRC_BYTESIZE_2;
1200
1201 }
1202
1203 lmc_mii_writereg (sc, 0, 16, sc->lmc_miireg16);
1204}
1205
1206/*
1207 * 1 == internal, 0 == external
1208 */
1209static void
1210lmc_t1_set_clock (lmc_softc_t * const sc, int ie)
1211{
1212 int old;
1213 old = ie;
1214 if (ie == LMC_CTL_CLOCK_SOURCE_EXT)
1215 {
1216 sc->lmc_gpio &= ~(LMC_GEP_SSI_TXCLOCK);
1217 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
1218 sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_EXT;
1219 if(old != ie)
1220 printk (LMC_PRINTF_FMT ": clock external\n", LMC_PRINTF_ARGS);
1221 }
1222 else
1223 {
1224 sc->lmc_gpio |= LMC_GEP_SSI_TXCLOCK;
1225 LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio);
1226 sc->ictl.clock_source = LMC_CTL_CLOCK_SOURCE_INT;
1227 if(old != ie)
1228 printk (LMC_PRINTF_FMT ": clock internal\n", LMC_PRINTF_ARGS);
1229 }
1230}
1231
1232static void
1233lmc_t1_watchdog (lmc_softc_t * const sc)
1234{
1235}
1236
1237static void
1238lmc_set_protocol (lmc_softc_t * const sc, lmc_ctl_t * ctl)
1239{
1240 if (ctl == 0)
1241 {
1242 sc->ictl.keepalive_onoff = LMC_CTL_ON;
1243
1244 return;
1245 }
1246}
diff --git a/drivers/net/wan/lmc/lmc_media.h b/drivers/net/wan/lmc/lmc_media.h
new file mode 100644
index 000000000000..ddcc00403563
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_media.h
@@ -0,0 +1,65 @@
1#ifndef _LMC_MEDIA_H_
2#define _LMC_MEDIA_H_
3
4lmc_media_t lmc_ds3_media = {
5 lmc_ds3_init, /* special media init stuff */
6 lmc_ds3_default, /* reset to default state */
7 lmc_ds3_set_status, /* reset status to state provided */
8 lmc_dummy_set_1, /* set clock source */
9 lmc_dummy_set2_1, /* set line speed */
10 lmc_ds3_set_100ft, /* set cable length */
11 lmc_ds3_set_scram, /* set scrambler */
12 lmc_ds3_get_link_status, /* get link status */
13 lmc_dummy_set_1, /* set link status */
14 lmc_ds3_set_crc_length, /* set CRC length */
15 lmc_dummy_set_1, /* set T1 or E1 circuit type */
16 lmc_ds3_watchdog
17};
18
19lmc_media_t lmc_hssi_media = {
20 lmc_hssi_init, /* special media init stuff */
21 lmc_hssi_default, /* reset to default state */
22 lmc_hssi_set_status, /* reset status to state provided */
23 lmc_hssi_set_clock, /* set clock source */
24 lmc_dummy_set2_1, /* set line speed */
25 lmc_dummy_set_1, /* set cable length */
26 lmc_dummy_set_1, /* set scrambler */
27 lmc_hssi_get_link_status, /* get link status */
28 lmc_hssi_set_link_status, /* set link status */
29 lmc_hssi_set_crc_length, /* set CRC length */
30 lmc_dummy_set_1, /* set T1 or E1 circuit type */
31 lmc_hssi_watchdog
32};
33
34lmc_media_t lmc_ssi_media = { lmc_ssi_init, /* special media init stuff */
35 lmc_ssi_default, /* reset to default state */
36 lmc_ssi_set_status, /* reset status to state provided */
37 lmc_ssi_set_clock, /* set clock source */
38 lmc_ssi_set_speed, /* set line speed */
39 lmc_dummy_set_1, /* set cable length */
40 lmc_dummy_set_1, /* set scrambler */
41 lmc_ssi_get_link_status, /* get link status */
42 lmc_ssi_set_link_status, /* set link status */
43 lmc_ssi_set_crc_length, /* set CRC length */
44 lmc_dummy_set_1, /* set T1 or E1 circuit type */
45 lmc_ssi_watchdog
46};
47
48lmc_media_t lmc_t1_media = {
49 lmc_t1_init, /* special media init stuff */
50 lmc_t1_default, /* reset to default state */
51 lmc_t1_set_status, /* reset status to state provided */
52 lmc_t1_set_clock, /* set clock source */
53 lmc_dummy_set2_1, /* set line speed */
54 lmc_dummy_set_1, /* set cable length */
55 lmc_dummy_set_1, /* set scrambler */
56 lmc_t1_get_link_status, /* get link status */
57 lmc_dummy_set_1, /* set link status */
58 lmc_t1_set_crc_length, /* set CRC length */
59 lmc_t1_set_circuit_type, /* set T1 or E1 circuit type */
60 lmc_t1_watchdog
61};
62
63
64#endif
65
diff --git a/drivers/net/wan/lmc/lmc_prot.h b/drivers/net/wan/lmc/lmc_prot.h
new file mode 100644
index 000000000000..f3b1df9e2cdb
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_prot.h
@@ -0,0 +1,15 @@
1#ifndef _LMC_PROTO_H_
2#define _LMC_PROTO_H_
3
4void lmc_proto_init(lmc_softc_t * const)
5void lmc_proto_attach(lmc_softc_t *sc const)
6void lmc_proto_detach(lmc_softc *sc const)
7void lmc_proto_reopen(lmc_softc_t *sc const)
8int lmc_proto_ioctl(lmc_softc_t *sc const, struct ifreq *ifr, int cmd)
9void lmc_proto_open(lmc_softc_t *sc const)
10void lmc_proto_close(lmc_softc_t *sc const)
11unsigned short lmc_proto_type(lmc_softc_t *sc const, struct skbuff *skb)
12
13
14#endif
15
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
new file mode 100644
index 000000000000..74876c0073e8
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -0,0 +1,249 @@
1 /*
2 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
3 * All rights reserved. www.lanmedia.com
4 *
5 * This code is written by:
6 * Andrew Stanley-Jones (asj@cban.com)
7 * Rob Braun (bbraun@vix.com),
8 * Michael Graff (explorer@vix.com) and
9 * Matt Thomas (matt@3am-software.com).
10 *
11 * With Help By:
12 * David Boggs
13 * Ron Crane
14 * Allan Cox
15 *
16 * This software may be used and distributed according to the terms
17 * of the GNU General Public License version 2, incorporated herein by reference.
18 *
19 * Driver for the LanMedia LMC5200, LMC5245, LMC1000, LMC1200 cards.
20 */
21
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/timer.h>
25#include <linux/ptrace.h>
26#include <linux/errno.h>
27#include <linux/ioport.h>
28#include <linux/slab.h>
29#include <linux/interrupt.h>
30#include <linux/pci.h>
31#include <linux/in.h>
32#include <linux/if_arp.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/inet.h>
37#include <linux/workqueue.h>
38#include <linux/proc_fs.h>
39#include <linux/bitops.h>
40
41#include <net/syncppp.h>
42
43#include <asm/processor.h> /* Processor type for cache alignment. */
44#include <asm/io.h>
45#include <asm/dma.h>
46#include <linux/smp.h>
47
48#include "lmc.h"
49#include "lmc_var.h"
50#include "lmc_debug.h"
51#include "lmc_ioctl.h"
52#include "lmc_proto.h"
53
54/*
55 * The compile-time variable SPPPSTUP causes the module to be
56 * compiled without referencing any of the sync ppp routines.
57 */
58#ifdef SPPPSTUB
59#define SPPP_detach(d) (void)0
60#define SPPP_open(d) 0
61#define SPPP_reopen(d) (void)0
62#define SPPP_close(d) (void)0
63#define SPPP_attach(d) (void)0
64#define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP
65#else
66#define SPPP_attach(x) sppp_attach((x)->pd)
67#define SPPP_detach(x) sppp_detach((x)->pd->dev)
68#define SPPP_open(x) sppp_open((x)->pd->dev)
69#define SPPP_reopen(x) sppp_reopen((x)->pd->dev)
70#define SPPP_close(x) sppp_close((x)->pd->dev)
71#define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z))
72#endif
73
74// init
75void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/
76{
77 lmc_trace(sc->lmc_device, "lmc_proto_init in");
78 switch(sc->if_type){
79 case LMC_PPP:
80 sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL);
81 if (!sc->pd) {
82 printk("lmc_proto_init(): kmalloc failure!\n");
83 return;
84 }
85 sc->pd->dev = sc->lmc_device;
86 sc->if_ptr = sc->pd;
87 break;
88 case LMC_RAW:
89 break;
90 default:
91 break;
92 }
93 lmc_trace(sc->lmc_device, "lmc_proto_init out");
94}
95
96// attach
97void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
98{
99 lmc_trace(sc->lmc_device, "lmc_proto_attach in");
100 switch(sc->if_type){
101 case LMC_PPP:
102 {
103 struct net_device *dev = sc->lmc_device;
104 SPPP_attach(sc);
105 dev->do_ioctl = lmc_ioctl;
106 }
107 break;
108 case LMC_NET:
109 {
110 struct net_device *dev = sc->lmc_device;
111 /*
112 * They set a few basics because they don't use sync_ppp
113 */
114 dev->flags |= IFF_POINTOPOINT;
115 dev->hard_header = NULL;
116 dev->hard_header_len = 0;
117 dev->addr_len = 0;
118 }
119 case LMC_RAW: /* Setup the task queue, maybe we should notify someone? */
120 {
121 }
122 default:
123 break;
124 }
125 lmc_trace(sc->lmc_device, "lmc_proto_attach out");
126}
127
128// detach
129void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/
130{
131 switch(sc->if_type){
132 case LMC_PPP:
133 SPPP_detach(sc);
134 break;
135 case LMC_RAW: /* Tell someone we're detaching? */
136 break;
137 default:
138 break;
139 }
140
141}
142
143// reopen
144void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/
145{
146 lmc_trace(sc->lmc_device, "lmc_proto_reopen in");
147 switch(sc->if_type){
148 case LMC_PPP:
149 SPPP_reopen(sc);
150 break;
151 case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */
152 break;
153 default:
154 break;
155 }
156 lmc_trace(sc->lmc_device, "lmc_proto_reopen out");
157}
158
159
160// ioctl
161int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/
162{
163 lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");
164 switch(sc->if_type){
165 case LMC_PPP:
166 return SPPP_do_ioctl (sc, ifr, cmd);
167 break;
168 default:
169 return -EOPNOTSUPP;
170 break;
171 }
172 lmc_trace(sc->lmc_device, "lmc_proto_ioctl out");
173}
174
175// open
176void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/
177{
178 int ret;
179
180 lmc_trace(sc->lmc_device, "lmc_proto_open in");
181 switch(sc->if_type){
182 case LMC_PPP:
183 ret = SPPP_open(sc);
184 if(ret < 0)
185 printk("%s: syncPPP open failed: %d\n", sc->name, ret);
186 break;
187 case LMC_RAW: /* We're about to start getting packets! */
188 break;
189 default:
190 break;
191 }
192 lmc_trace(sc->lmc_device, "lmc_proto_open out");
193}
194
195// close
196
197void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/
198{
199 lmc_trace(sc->lmc_device, "lmc_proto_close in");
200 switch(sc->if_type){
201 case LMC_PPP:
202 SPPP_close(sc);
203 break;
204 case LMC_RAW: /* Interface going down */
205 break;
206 default:
207 break;
208 }
209 lmc_trace(sc->lmc_device, "lmc_proto_close out");
210}
211
212unsigned short lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
213{
214 lmc_trace(sc->lmc_device, "lmc_proto_type in");
215 switch(sc->if_type){
216 case LMC_PPP:
217 return htons(ETH_P_WAN_PPP);
218 break;
219 case LMC_NET:
220 return htons(ETH_P_802_2);
221 break;
222 case LMC_RAW: /* Packet type for skbuff kind of useless */
223 return htons(ETH_P_802_2);
224 break;
225 default:
226 printk(KERN_WARNING "%s: No protocol set for this interface, assuming 802.2 (which is wrong!!)\n", sc->name);
227 return htons(ETH_P_802_2);
228 break;
229 }
230 lmc_trace(sc->lmc_device, "lmc_proto_tye out");
231
232}
233
234void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
235{
236 lmc_trace(sc->lmc_device, "lmc_proto_netif in");
237 switch(sc->if_type){
238 case LMC_PPP:
239 case LMC_NET:
240 default:
241 skb->dev->last_rx = jiffies;
242 netif_rx(skb);
243 break;
244 case LMC_RAW:
245 break;
246 }
247 lmc_trace(sc->lmc_device, "lmc_proto_netif out");
248}
249
diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h
new file mode 100644
index 000000000000..080a55773349
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_proto.h
@@ -0,0 +1,16 @@
1#ifndef _LMC_PROTO_H_
2#define _LMC_PROTO_H_
3
4void lmc_proto_init(lmc_softc_t *sc);
5void lmc_proto_attach(lmc_softc_t *sc);
6void lmc_proto_detach(lmc_softc_t *sc);
7void lmc_proto_reopen(lmc_softc_t *sc);
8int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd);
9void lmc_proto_open(lmc_softc_t *sc);
10void lmc_proto_close(lmc_softc_t *sc);
11unsigned short lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb);
12void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb);
13int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused);
14
15#endif
16
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
new file mode 100644
index 000000000000..6d003a39bfad
--- /dev/null
+++ b/drivers/net/wan/lmc/lmc_var.h
@@ -0,0 +1,570 @@
1#ifndef _LMC_VAR_H_
2#define _LMC_VAR_H_
3
4/* $Id: lmc_var.h,v 1.17 2000/04/06 12:16:47 asj Exp $ */
5
6 /*
7 * Copyright (c) 1997-2000 LAN Media Corporation (LMC)
8 * All rights reserved. www.lanmedia.com
9 *
10 * This code is written by:
11 * Andrew Stanley-Jones (asj@cban.com)
12 * Rob Braun (bbraun@vix.com),
13 * Michael Graff (explorer@vix.com) and
14 * Matt Thomas (matt@3am-software.com).
15 *
16 * This software may be used and distributed according to the terms
17 * of the GNU General Public License version 2, incorporated herein by reference.
18 */
19
20#include <linux/timer.h>
21
22#ifndef __KERNEL__
23typedef signed char s8;
24typedef unsigned char u8;
25
26typedef signed short s16;
27typedef unsigned short u16;
28
29typedef signed int s32;
30typedef unsigned int u32;
31
32typedef signed long long s64;
33typedef unsigned long long u64;
34
35#define BITS_PER_LONG 32
36
37#endif
38
39/*
40 * basic definitions used in lmc include files
41 */
42
43typedef struct lmc___softc lmc_softc_t;
44typedef struct lmc___media lmc_media_t;
45typedef struct lmc___ctl lmc_ctl_t;
46
47#define lmc_csrptr_t unsigned long
48#define u_int16_t u16
49#define u_int8_t u8
50#define tulip_uint32_t u32
51
52#define LMC_REG_RANGE 0x80
53
54#define LMC_PRINTF_FMT "%s"
55#define LMC_PRINTF_ARGS (sc->lmc_device->name)
56
57#define TX_TIMEOUT (2*HZ)
58
59#define LMC_TXDESCS 32
60#define LMC_RXDESCS 32
61
62#define LMC_LINK_UP 1
63#define LMC_LINK_DOWN 0
64
65/* These macros for generic read and write to and from the dec chip */
66#define LMC_CSR_READ(sc, csr) \
67 inl((sc)->lmc_csrs.csr)
68#define LMC_CSR_WRITE(sc, reg, val) \
69 outl((val), (sc)->lmc_csrs.reg)
70
71//#ifdef _LINUX_DELAY_H
72// #define SLOW_DOWN_IO udelay(2);
73// #undef __SLOW_DOWN_IO
74// #define __SLOW_DOWN_IO udelay(2);
75//#endif
76
77#define DELAY(n) SLOW_DOWN_IO
78
79#define lmc_delay() inl(sc->lmc_csrs.csr_9)
80
81/* This macro sync's up with the mii so that reads and writes can take place */
82#define LMC_MII_SYNC(sc) do {int n=32; while( n >= 0 ) { \
83 LMC_CSR_WRITE((sc), csr_9, 0x20000); \
84 lmc_delay(); \
85 LMC_CSR_WRITE((sc), csr_9, 0x30000); \
86 lmc_delay(); \
87 n--; }} while(0)
88
89struct lmc_regfile_t {
90 lmc_csrptr_t csr_busmode; /* CSR0 */
91 lmc_csrptr_t csr_txpoll; /* CSR1 */
92 lmc_csrptr_t csr_rxpoll; /* CSR2 */
93 lmc_csrptr_t csr_rxlist; /* CSR3 */
94 lmc_csrptr_t csr_txlist; /* CSR4 */
95 lmc_csrptr_t csr_status; /* CSR5 */
96 lmc_csrptr_t csr_command; /* CSR6 */
97 lmc_csrptr_t csr_intr; /* CSR7 */
98 lmc_csrptr_t csr_missed_frames; /* CSR8 */
99 lmc_csrptr_t csr_9; /* CSR9 */
100 lmc_csrptr_t csr_10; /* CSR10 */
101 lmc_csrptr_t csr_11; /* CSR11 */
102 lmc_csrptr_t csr_12; /* CSR12 */
103 lmc_csrptr_t csr_13; /* CSR13 */
104 lmc_csrptr_t csr_14; /* CSR14 */
105 lmc_csrptr_t csr_15; /* CSR15 */
106};
107
108#define csr_enetrom csr_9 /* 21040 */
109#define csr_reserved csr_10 /* 21040 */
110#define csr_full_duplex csr_11 /* 21040 */
111#define csr_bootrom csr_10 /* 21041/21140A/?? */
112#define csr_gp csr_12 /* 21140* */
113#define csr_watchdog csr_15 /* 21140* */
114#define csr_gp_timer csr_11 /* 21041/21140* */
115#define csr_srom_mii csr_9 /* 21041/21140* */
116#define csr_sia_status csr_12 /* 2104x */
117#define csr_sia_connectivity csr_13 /* 2104x */
118#define csr_sia_tx_rx csr_14 /* 2104x */
119#define csr_sia_general csr_15 /* 2104x */
120
121/* tulip length/control transmit descriptor definitions
122 * used to define bits in the second tulip_desc_t field (length)
123 * for the transmit descriptor -baz */
124
125#define LMC_TDES_FIRST_BUFFER_SIZE ((u_int32_t)(0x000007FF))
126#define LMC_TDES_SECOND_BUFFER_SIZE ((u_int32_t)(0x003FF800))
127#define LMC_TDES_HASH_FILTERING ((u_int32_t)(0x00400000))
128#define LMC_TDES_DISABLE_PADDING ((u_int32_t)(0x00800000))
129#define LMC_TDES_SECOND_ADDR_CHAINED ((u_int32_t)(0x01000000))
130#define LMC_TDES_END_OF_RING ((u_int32_t)(0x02000000))
131#define LMC_TDES_ADD_CRC_DISABLE ((u_int32_t)(0x04000000))
132#define LMC_TDES_SETUP_PACKET ((u_int32_t)(0x08000000))
133#define LMC_TDES_INVERSE_FILTERING ((u_int32_t)(0x10000000))
134#define LMC_TDES_FIRST_SEGMENT ((u_int32_t)(0x20000000))
135#define LMC_TDES_LAST_SEGMENT ((u_int32_t)(0x40000000))
136#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u_int32_t)(0x80000000))
137
138#define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11
139#define TDES_COLLISION_COUNT_BIT_NUMBER 3
140
141/* Constants for the RCV descriptor RDES */
142
143#define LMC_RDES_OVERFLOW ((u_int32_t)(0x00000001))
144#define LMC_RDES_CRC_ERROR ((u_int32_t)(0x00000002))
145#define LMC_RDES_DRIBBLING_BIT ((u_int32_t)(0x00000004))
146#define LMC_RDES_REPORT_ON_MII_ERR ((u_int32_t)(0x00000008))
147#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u_int32_t)(0x00000010))
148#define LMC_RDES_FRAME_TYPE ((u_int32_t)(0x00000020))
149#define LMC_RDES_COLLISION_SEEN ((u_int32_t)(0x00000040))
150#define LMC_RDES_FRAME_TOO_LONG ((u_int32_t)(0x00000080))
151#define LMC_RDES_LAST_DESCRIPTOR ((u_int32_t)(0x00000100))
152#define LMC_RDES_FIRST_DESCRIPTOR ((u_int32_t)(0x00000200))
153#define LMC_RDES_MULTICAST_FRAME ((u_int32_t)(0x00000400))
154#define LMC_RDES_RUNT_FRAME ((u_int32_t)(0x00000800))
155#define LMC_RDES_DATA_TYPE ((u_int32_t)(0x00003000))
156#define LMC_RDES_LENGTH_ERROR ((u_int32_t)(0x00004000))
157#define LMC_RDES_ERROR_SUMMARY ((u_int32_t)(0x00008000))
158#define LMC_RDES_FRAME_LENGTH ((u_int32_t)(0x3FFF0000))
159#define LMC_RDES_OWN_BIT ((u_int32_t)(0x80000000))
160
161#define RDES_FRAME_LENGTH_BIT_NUMBER 16
162
163#define LMC_RDES_ERROR_MASK ( (u_int32_t)( \
164 LMC_RDES_OVERFLOW \
165 | LMC_RDES_DRIBBLING_BIT \
166 | LMC_RDES_REPORT_ON_MII_ERR \
167 | LMC_RDES_COLLISION_SEEN ) )
168
169
170/*
171 * Ioctl info
172 */
173
174typedef struct {
175 u_int32_t n;
176 u_int32_t m;
177 u_int32_t v;
178 u_int32_t x;
179 u_int32_t r;
180 u_int32_t f;
181 u_int32_t exact;
182} lmc_av9110_t;
183
184/*
185 * Common structure passed to the ioctl code.
186 */
187struct lmc___ctl {
188 u_int32_t cardtype;
189 u_int32_t clock_source; /* HSSI, T1 */
190 u_int32_t clock_rate; /* T1 */
191 u_int32_t crc_length;
192 u_int32_t cable_length; /* DS3 */
193 u_int32_t scrambler_onoff; /* DS3 */
194 u_int32_t cable_type; /* T1 */
195 u_int32_t keepalive_onoff; /* protocol */
196 u_int32_t ticks; /* ticks/sec */
197 union {
198 lmc_av9110_t ssi;
199 } cardspec;
200 u_int32_t circuit_type; /* T1 or E1 */
201};
202
203
204/*
205 * Carefull, look at the data sheet, there's more to this
206 * structure than meets the eye. It should probably be:
207 *
208 * struct tulip_desc_t {
209 * u8 own:1;
210 * u32 status:31;
211 * u32 control:10;
212 * u32 buffer1;
213 * u32 buffer2;
214 * };
215 * You could also expand status control to provide more bit information
216 */
217
218struct tulip_desc_t {
219 s32 status;
220 s32 length;
221 u32 buffer1;
222 u32 buffer2;
223};
224
225/*
226 * media independent methods to check on media status, link, light LEDs,
227 * etc.
228 */
229struct lmc___media {
230 void (* init)(lmc_softc_t * const);
231 void (* defaults)(lmc_softc_t * const);
232 void (* set_status)(lmc_softc_t * const, lmc_ctl_t *);
233 void (* set_clock_source)(lmc_softc_t * const, int);
234 void (* set_speed)(lmc_softc_t * const, lmc_ctl_t *);
235 void (* set_cable_length)(lmc_softc_t * const, int);
236 void (* set_scrambler)(lmc_softc_t * const, int);
237 int (* get_link_status)(lmc_softc_t * const);
238 void (* set_link_status)(lmc_softc_t * const, int);
239 void (* set_crc_length)(lmc_softc_t * const, int);
240 void (* set_circuit_type)(lmc_softc_t * const, int);
241 void (* watchdog)(lmc_softc_t * const);
242};
243
244
245#define STATCHECK 0xBEEFCAFE
246
247/* Included in this structure are first
248 * - standard net_device_stats
249 * - some other counters used for debug and driver performance
250 * evaluation -baz
251 */
252struct lmc_statistics
253{
254 unsigned long rx_packets; /* total packets received */
255 unsigned long tx_packets; /* total packets transmitted */
256 unsigned long rx_bytes;
257 unsigned long tx_bytes;
258
259 unsigned long rx_errors; /* bad packets received */
260 unsigned long tx_errors; /* packet transmit problems */
261 unsigned long rx_dropped; /* no space in linux buffers */
262 unsigned long tx_dropped; /* no space available in linux */
263 unsigned long multicast; /* multicast packets received */
264 unsigned long collisions;
265
266 /* detailed rx_errors: */
267 unsigned long rx_length_errors;
268 unsigned long rx_over_errors; /* receiver ring buff overflow */
269 unsigned long rx_crc_errors; /* recved pkt with crc error */
270 unsigned long rx_frame_errors; /* recv'd frame alignment error */
271 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
272 unsigned long rx_missed_errors; /* receiver missed packet */
273
274 /* detailed tx_errors */
275 unsigned long tx_aborted_errors;
276 unsigned long tx_carrier_errors;
277 unsigned long tx_fifo_errors;
278 unsigned long tx_heartbeat_errors;
279 unsigned long tx_window_errors;
280
281 /* for cslip etc */
282 unsigned long rx_compressed;
283 unsigned long tx_compressed;
284
285 /* -------------------------------------
286 * Custom stats & counters follow -baz */
287 u_int32_t version_size;
288 u_int32_t lmc_cardtype;
289
290 u_int32_t tx_ProcTimeout;
291 u_int32_t tx_IntTimeout;
292 u_int32_t tx_NoCompleteCnt;
293 u_int32_t tx_MaxXmtsB4Int;
294 u_int32_t tx_TimeoutCnt;
295 u_int32_t tx_OutOfSyncPtr;
296 u_int32_t tx_tbusy0;
297 u_int32_t tx_tbusy1;
298 u_int32_t tx_tbusy_calls;
299 u_int32_t resetCount;
300 u_int32_t lmc_txfull;
301 u_int32_t tbusy;
302 u_int32_t dirtyTx;
303 u_int32_t lmc_next_tx;
304 u_int32_t otherTypeCnt;
305 u_int32_t lastType;
306 u_int32_t lastTypeOK;
307 u_int32_t txLoopCnt;
308 u_int32_t usedXmtDescripCnt;
309 u_int32_t txIndexCnt;
310 u_int32_t rxIntLoopCnt;
311
312 u_int32_t rx_SmallPktCnt;
313 u_int32_t rx_BadPktSurgeCnt;
314 u_int32_t rx_BuffAllocErr;
315 u_int32_t tx_lossOfClockCnt;
316
317 /* T1 error counters */
318 u_int32_t framingBitErrorCount;
319 u_int32_t lineCodeViolationCount;
320
321 u_int32_t lossOfFrameCount;
322 u_int32_t changeOfFrameAlignmentCount;
323 u_int32_t severelyErroredFrameCount;
324
325 u_int32_t check;
326};
327
328
329typedef struct lmc_xinfo {
330 u_int32_t Magic0; /* BEEFCAFE */
331
332 u_int32_t PciCardType;
333 u_int32_t PciSlotNumber; /* PCI slot number */
334
335 u_int16_t DriverMajorVersion;
336 u_int16_t DriverMinorVersion;
337 u_int16_t DriverSubVersion;
338
339 u_int16_t XilinxRevisionNumber;
340 u_int16_t MaxFrameSize;
341
342 u_int16_t t1_alarm1_status;
343 u_int16_t t1_alarm2_status;
344
345 int link_status;
346 u_int32_t mii_reg16;
347
348 u_int32_t Magic1; /* DEADBEEF */
349} LMC_XINFO;
350
351
352/*
353 * forward decl
354 */
355struct lmc___softc {
356 void *if_ptr; /* General purpose pointer (used by SPPP) */
357 char *name;
358 u8 board_idx;
359 struct lmc_statistics stats;
360 struct net_device *lmc_device;
361
362 int hang, rxdesc, bad_packet, some_counter;
363 u_int32_t txgo;
364 struct lmc_regfile_t lmc_csrs;
365 volatile u_int32_t lmc_txtick;
366 volatile u_int32_t lmc_rxtick;
367 u_int32_t lmc_flags;
368 u_int32_t lmc_intrmask; /* our copy of csr_intr */
369 u_int32_t lmc_cmdmode; /* our copy of csr_cmdmode */
370 u_int32_t lmc_busmode; /* our copy of csr_busmode */
371 u_int32_t lmc_gpio_io; /* state of in/out settings */
372 u_int32_t lmc_gpio; /* state of outputs */
373 struct sk_buff* lmc_txq[LMC_TXDESCS];
374 struct sk_buff* lmc_rxq[LMC_RXDESCS];
375 volatile
376 struct tulip_desc_t lmc_rxring[LMC_RXDESCS];
377 volatile
378 struct tulip_desc_t lmc_txring[LMC_TXDESCS];
379 unsigned int lmc_next_rx, lmc_next_tx;
380 volatile
381 unsigned int lmc_taint_tx, lmc_taint_rx;
382 int lmc_tx_start, lmc_txfull;
383 int lmc_txbusy;
384 u_int16_t lmc_miireg16;
385 int lmc_ok;
386 int last_link_status;
387 int lmc_cardtype;
388 u_int32_t last_frameerr;
389 lmc_media_t *lmc_media;
390 struct timer_list timer;
391 lmc_ctl_t ictl;
392 u_int32_t TxDescriptControlInit;
393
394 int tx_TimeoutInd; /* additional driver state */
395 int tx_TimeoutDisplay;
396 unsigned int lastlmc_taint_tx;
397 int lasttx_packets;
398 u_int32_t tx_clockState;
399 u_int32_t lmc_crcSize;
400 LMC_XINFO lmc_xinfo;
401 char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */
402 char lmc_timing; /* for HSSI and SSI */
403 int got_irq;
404
405 char last_led_err[4];
406
407 u32 last_int;
408 u32 num_int;
409
410 spinlock_t lmc_lock;
411 u_int16_t if_type; /* PPP or NET */
412 struct ppp_device *pd;
413
414 /* Failure cases */
415 u8 failed_ring;
416 u8 failed_recv_alloc;
417
418 /* Structure check */
419 u32 check;
420};
421
422#define LMC_PCI_TIME 1
423#define LMC_EXT_TIME 0
424
425#define PKT_BUF_SZ 1542 /* was 1536 */
426
427/* CSR5 settings */
428#define TIMER_INT 0x00000800
429#define TP_LINK_FAIL 0x00001000
430#define TP_LINK_PASS 0x00000010
431#define NORMAL_INT 0x00010000
432#define ABNORMAL_INT 0x00008000
433#define RX_JABBER_INT 0x00000200
434#define RX_DIED 0x00000100
435#define RX_NOBUFF 0x00000080
436#define RX_INT 0x00000040
437#define TX_FIFO_UNDER 0x00000020
438#define TX_JABBER 0x00000008
439#define TX_NOBUFF 0x00000004
440#define TX_DIED 0x00000002
441#define TX_INT 0x00000001
442
443/* CSR6 settings */
444#define OPERATION_MODE 0x00000200 /* Full Duplex */
445#define PROMISC_MODE 0x00000040 /* Promiscuous Mode */
446#define RECIEVE_ALL 0x40000000 /* Recieve All */
447#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames */
448
449/* Dec control registers CSR6 as well */
450#define LMC_DEC_ST 0x00002000
451#define LMC_DEC_SR 0x00000002
452
453/* CSR15 settings */
454#define RECV_WATCHDOG_DISABLE 0x00000010
455#define JABBER_DISABLE 0x00000001
456
457/* More settings */
458/*
459 * aSR6 -- Command (Operation Mode) Register
460 */
461#define TULIP_CMD_RECEIVEALL 0x40000000L /* (RW) Receivel all frames? */
462#define TULIP_CMD_MUSTBEONE 0x02000000L /* (RW) Must Be One (21140) */
463#define TULIP_CMD_TXTHRSHLDCTL 0x00400000L /* (RW) Transmit Threshold Mode (21140) */
464#define TULIP_CMD_STOREFWD 0x00200000L /* (RW) Store and Foward (21140) */
465#define TULIP_CMD_NOHEARTBEAT 0x00080000L /* (RW) No Heartbeat (21140) */
466#define TULIP_CMD_PORTSELECT 0x00040000L /* (RW) Post Select (100Mb) (21140) */
467#define TULIP_CMD_FULLDUPLEX 0x00000200L /* (RW) Full Duplex Mode */
468#define TULIP_CMD_OPERMODE 0x00000C00L /* (RW) Operating Mode */
469#define TULIP_CMD_PROMISCUOUS 0x00000041L /* (RW) Promiscuous Mode */
470#define TULIP_CMD_PASSBADPKT 0x00000008L /* (RW) Pass Bad Frames */
471#define TULIP_CMD_THRESHOLDCTL 0x0000C000L /* (RW) Threshold Control */
472
473#define TULIP_GP_PINSET 0x00000100L
474#define TULIP_BUSMODE_SWRESET 0x00000001L
475#define TULIP_WATCHDOG_TXDISABLE 0x00000001L
476#define TULIP_WATCHDOG_RXDISABLE 0x00000010L
477
478#define TULIP_STS_NORMALINTR 0x00010000L /* (RW) Normal Interrupt */
479#define TULIP_STS_ABNRMLINTR 0x00008000L /* (RW) Abnormal Interrupt */
480#define TULIP_STS_ERI 0x00004000L /* (RW) Early Receive Interrupt */
481#define TULIP_STS_SYSERROR 0x00002000L /* (RW) System Error */
482#define TULIP_STS_GTE 0x00000800L /* (RW) General Pupose Timer Exp */
483#define TULIP_STS_ETI 0x00000400L /* (RW) Early Transmit Interrupt */
484#define TULIP_STS_RXWT 0x00000200L /* (RW) Receiver Watchdog Timeout */
485#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receiver Process Stopped */
486#define TULIP_STS_RXNOBUF 0x00000080L /* (RW) Receive Buf Unavail */
487#define TULIP_STS_RXINTR 0x00000040L /* (RW) Receive Interrupt */
488#define TULIP_STS_TXUNDERFLOW 0x00000020L /* (RW) Transmit Underflow */
489#define TULIP_STS_TXJABER 0x00000008L /* (RW) Jabber timeout */
490#define TULIP_STS_TXNOBUF 0x00000004L
491#define TULIP_STS_TXSTOPPED 0x00000002L /* (RW) Transmit Process Stopped */
492#define TULIP_STS_TXINTR 0x00000001L /* (RW) Transmit Interrupt */
493
494#define TULIP_STS_RXS_STOPPED 0x00000000L /* 000 - Stopped */
495
496#define TULIP_STS_RXSTOPPED 0x00000100L /* (RW) Receive Process Stopped */
497#define TULIP_STS_RXNOBUF 0x00000080L
498
499#define TULIP_CMD_TXRUN 0x00002000L /* (RW) Start/Stop Transmitter */
500#define TULIP_CMD_RXRUN 0x00000002L /* (RW) Start/Stop Receive Filtering */
501#define TULIP_DSTS_TxDEFERRED 0x00000001 /* Initially Deferred */
502#define TULIP_DSTS_OWNER 0x80000000 /* Owner (1 = 21040) */
503#define TULIP_DSTS_RxMIIERR 0x00000008
504#define LMC_DSTS_ERRSUM (TULIP_DSTS_RxMIIERR)
505
506#define TULIP_DEFAULT_INTR_MASK (TULIP_STS_NORMALINTR \
507 | TULIP_STS_RXINTR \
508 | TULIP_STS_TXINTR \
509 | TULIP_STS_ABNRMLINTR \
510 | TULIP_STS_SYSERROR \
511 | TULIP_STS_TXSTOPPED \
512 | TULIP_STS_TXUNDERFLOW\
513 | TULIP_STS_RXSTOPPED )
514
515#define DESC_OWNED_BY_SYSTEM ((u_int32_t)(0x00000000))
516#define DESC_OWNED_BY_DC21X4 ((u_int32_t)(0x80000000))
517
518#ifndef TULIP_CMD_RECEIVEALL
519#define TULIP_CMD_RECEIVEALL 0x40000000L
520#endif
521
522/* Adapter module number */
523#define LMC_ADAP_HSSI 2
524#define LMC_ADAP_DS3 3
525#define LMC_ADAP_SSI 4
526#define LMC_ADAP_T1 5
527
528#define HDLC_HDR_LEN 4
529#define HDLC_ADDR_LEN 1
530#define HDLC_SLARP 0x8035
531#define LMC_MTU 1500
532#define SLARP_LINECHECK 2
533
534#define LMC_CRC_LEN_16 2 /* 16-bit CRC */
535#define LMC_CRC_LEN_32 4
536
537#ifdef LMC_HDLC
538/* definition of an hdlc header. */
539struct hdlc_hdr
540{
541 u8 address;
542 u8 control;
543 u16 type;
544};
545
546/* definition of a slarp header. */
547struct slarp
548{
549 long code;
550 union sl
551 {
552 struct
553 {
554 ulong address;
555 ulong mask;
556 ushort unused;
557 } add;
558 struct
559 {
560 ulong mysequence;
561 ulong yoursequence;
562 ushort reliability;
563 ulong time;
564 } chk;
565 } t;
566};
567#endif /* LMC_HDLC */
568
569
570#endif /* _LMC_VAR_H_ */
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
new file mode 100644
index 000000000000..cd32751b64eb
--- /dev/null
+++ b/drivers/net/wan/n2.c
@@ -0,0 +1,562 @@
1/*
2 * SDL Inc. RISCom/N2 synchronous serial card driver for Linux
3 *
4 * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * For information see http://hq.pm.waw.pl/hdlc/
11 *
12 * Note: integrated CSU/DSU/DDS are not supported by this driver
13 *
14 * Sources of information:
15 * Hitachi HD64570 SCA User's Manual
16 * SDL Inc. PPP/HDLC/CISCO driver
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/slab.h>
22#include <linux/types.h>
23#include <linux/fcntl.h>
24#include <linux/in.h>
25#include <linux/string.h>
26#include <linux/errno.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/moduleparam.h>
30#include <linux/netdevice.h>
31#include <linux/hdlc.h>
32#include <asm/io.h>
33#include "hd64570.h"
34
35
36static const char* version = "SDL RISCom/N2 driver version: 1.15";
37static const char* devname = "RISCom/N2";
38
39#undef DEBUG_PKT
40#define DEBUG_RINGS
41
42#define USE_WINDOWSIZE 16384
43#define USE_BUS16BITS 1
44#define CLOCK_BASE 9830400 /* 9.8304 MHz */
45#define MAX_PAGES 16 /* 16 RAM pages at max */
46#define MAX_RAM_SIZE 0x80000 /* 512 KB */
47#if MAX_RAM_SIZE > MAX_PAGES * USE_WINDOWSIZE
48#undef MAX_RAM_SIZE
49#define MAX_RAM_SIZE (MAX_PAGES * USE_WINDOWSIZE)
50#endif
51#define N2_IOPORTS 0x10
52#define NEED_DETECT_RAM
53#define NEED_SCA_MSCI_INTR
54#define MAX_TX_BUFFERS 10
55
56static char *hw = NULL; /* pointer to hw=xxx command line string */
57
58/* RISCom/N2 Board Registers */
59
60/* PC Control Register */
61#define N2_PCR 0
62#define PCR_RUNSCA 1 /* Run 64570 */
63#define PCR_VPM 2 /* Enable VPM - needed if using RAM above 1 MB */
64#define PCR_ENWIN 4 /* Open window */
65#define PCR_BUS16 8 /* 16-bit bus */
66
67
68/* Memory Base Address Register */
69#define N2_BAR 2
70
71
72/* Page Scan Register */
73#define N2_PSR 4
74#define WIN16K 0x00
75#define WIN32K 0x20
76#define WIN64K 0x40
77#define PSR_WINBITS 0x60
78#define PSR_DMAEN 0x80
79#define PSR_PAGEBITS 0x0F
80
81
82/* Modem Control Reg */
83#define N2_MCR 6
84#define CLOCK_OUT_PORT1 0x80
85#define CLOCK_OUT_PORT0 0x40
86#define TX422_PORT1 0x20
87#define TX422_PORT0 0x10
88#define DSR_PORT1 0x08
89#define DSR_PORT0 0x04
90#define DTR_PORT1 0x02
91#define DTR_PORT0 0x01
92
93
94typedef struct port_s {
95 struct net_device *dev;
96 struct card_s *card;
97 spinlock_t lock; /* TX lock */
98 sync_serial_settings settings;
99 int valid; /* port enabled */
100 int rxpart; /* partial frame received, next frame invalid*/
101 unsigned short encoding;
102 unsigned short parity;
103 u16 rxin; /* rx ring buffer 'in' pointer */
104 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
105 u16 txlast;
106 u8 rxs, txs, tmc; /* SCA registers */
107 u8 phy_node; /* physical port # - 0 or 1 */
108 u8 log_node; /* logical port # */
109}port_t;
110
111
112
113typedef struct card_s {
114 u8 __iomem *winbase; /* ISA window base address */
115 u32 phy_winbase; /* ISA physical base address */
116 u32 ram_size; /* number of bytes */
117 u16 io; /* IO Base address */
118 u16 buff_offset; /* offset of first buffer of first channel */
119 u16 rx_ring_buffers; /* number of buffers in a ring */
120 u16 tx_ring_buffers;
121 u8 irq; /* IRQ (3-15) */
122
123 port_t ports[2];
124 struct card_s *next_card;
125}card_t;
126
127
128static card_t *first_card;
129static card_t **new_card = &first_card;
130
131
132#define sca_reg(reg, card) (0x8000 | (card)->io | \
133 ((reg) & 0x0F) | (((reg) & 0xF0) << 6))
134#define sca_in(reg, card) inb(sca_reg(reg, card))
135#define sca_out(value, reg, card) outb(value, sca_reg(reg, card))
136#define sca_inw(reg, card) inw(sca_reg(reg, card))
137#define sca_outw(value, reg, card) outw(value, sca_reg(reg, card))
138
139#define port_to_card(port) ((port)->card)
140#define log_node(port) ((port)->log_node)
141#define phy_node(port) ((port)->phy_node)
142#define winsize(card) (USE_WINDOWSIZE)
143#define winbase(card) ((card)->winbase)
144#define get_port(card, port) ((card)->ports[port].valid ? \
145 &(card)->ports[port] : NULL)
146
147
148
149static __inline__ u8 sca_get_page(card_t *card)
150{
151 return inb(card->io + N2_PSR) & PSR_PAGEBITS;
152}
153
154
155static __inline__ void openwin(card_t *card, u8 page)
156{
157 u8 psr = inb(card->io + N2_PSR);
158 outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR);
159}
160
161
162
163#include "hd6457x.c"
164
165
166
167static void n2_set_iface(port_t *port)
168{
169 card_t *card = port->card;
170 int io = card->io;
171 u8 mcr = inb(io + N2_MCR);
172 u8 msci = get_msci(port);
173 u8 rxs = port->rxs & CLK_BRG_MASK;
174 u8 txs = port->txs & CLK_BRG_MASK;
175
176 switch(port->settings.clock_type) {
177 case CLOCK_INT:
178 mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
179 rxs |= CLK_BRG_RX; /* BRG output */
180 txs |= CLK_RXCLK_TX; /* RX clock */
181 break;
182
183 case CLOCK_TXINT:
184 mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
185 rxs |= CLK_LINE_RX; /* RXC input */
186 txs |= CLK_BRG_TX; /* BRG output */
187 break;
188
189 case CLOCK_TXFROMRX:
190 mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
191 rxs |= CLK_LINE_RX; /* RXC input */
192 txs |= CLK_RXCLK_TX; /* RX clock */
193 break;
194
195 default: /* Clock EXTernal */
196 mcr &= port->phy_node ? ~CLOCK_OUT_PORT1 : ~CLOCK_OUT_PORT0;
197 rxs |= CLK_LINE_RX; /* RXC input */
198 txs |= CLK_LINE_TX; /* TXC input */
199 }
200
201 outb(mcr, io + N2_MCR);
202 port->rxs = rxs;
203 port->txs = txs;
204 sca_out(rxs, msci + RXS, card);
205 sca_out(txs, msci + TXS, card);
206 sca_set_port(port);
207}
208
209
210
211static int n2_open(struct net_device *dev)
212{
213 port_t *port = dev_to_port(dev);
214 int io = port->card->io;
215 u8 mcr = inb(io + N2_MCR) | (port->phy_node ? TX422_PORT1:TX422_PORT0);
216 int result;
217
218 result = hdlc_open(dev);
219 if (result)
220 return result;
221
222 mcr &= port->phy_node ? ~DTR_PORT1 : ~DTR_PORT0; /* set DTR ON */
223 outb(mcr, io + N2_MCR);
224
225 outb(inb(io + N2_PCR) | PCR_ENWIN, io + N2_PCR); /* open window */
226 outb(inb(io + N2_PSR) | PSR_DMAEN, io + N2_PSR); /* enable dma */
227 sca_open(dev);
228 n2_set_iface(port);
229 return 0;
230}
231
232
233
234static int n2_close(struct net_device *dev)
235{
236 port_t *port = dev_to_port(dev);
237 int io = port->card->io;
238 u8 mcr = inb(io+N2_MCR) | (port->phy_node ? TX422_PORT1 : TX422_PORT0);
239
240 sca_close(dev);
241 mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */
242 outb(mcr, io + N2_MCR);
243 hdlc_close(dev);
244 return 0;
245}
246
247
248
249static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
250{
251 const size_t size = sizeof(sync_serial_settings);
252 sync_serial_settings new_line;
253 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
254 port_t *port = dev_to_port(dev);
255
256#ifdef DEBUG_RINGS
257 if (cmd == SIOCDEVPRIVATE) {
258 sca_dump_rings(dev);
259 return 0;
260 }
261#endif
262 if (cmd != SIOCWANDEV)
263 return hdlc_ioctl(dev, ifr, cmd);
264
265 switch(ifr->ifr_settings.type) {
266 case IF_GET_IFACE:
267 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
268 if (ifr->ifr_settings.size < size) {
269 ifr->ifr_settings.size = size; /* data size wanted */
270 return -ENOBUFS;
271 }
272 if (copy_to_user(line, &port->settings, size))
273 return -EFAULT;
274 return 0;
275
276 case IF_IFACE_SYNC_SERIAL:
277 if(!capable(CAP_NET_ADMIN))
278 return -EPERM;
279
280 if (copy_from_user(&new_line, line, size))
281 return -EFAULT;
282
283 if (new_line.clock_type != CLOCK_EXT &&
284 new_line.clock_type != CLOCK_TXFROMRX &&
285 new_line.clock_type != CLOCK_INT &&
286 new_line.clock_type != CLOCK_TXINT)
287 return -EINVAL; /* No such clock setting */
288
289 if (new_line.loopback != 0 && new_line.loopback != 1)
290 return -EINVAL;
291
292 memcpy(&port->settings, &new_line, size); /* Update settings */
293 n2_set_iface(port);
294 return 0;
295
296 default:
297 return hdlc_ioctl(dev, ifr, cmd);
298 }
299}
300
301
302
303static void n2_destroy_card(card_t *card)
304{
305 int cnt;
306
307 for (cnt = 0; cnt < 2; cnt++)
308 if (card->ports[cnt].card) {
309 struct net_device *dev = port_to_dev(&card->ports[cnt]);
310 unregister_hdlc_device(dev);
311 }
312
313 if (card->irq)
314 free_irq(card->irq, card);
315
316 if (card->winbase) {
317 iounmap(card->winbase);
318 release_mem_region(card->phy_winbase, USE_WINDOWSIZE);
319 }
320
321 if (card->io)
322 release_region(card->io, N2_IOPORTS);
323 if (card->ports[0].dev)
324 free_netdev(card->ports[0].dev);
325 if (card->ports[1].dev)
326 free_netdev(card->ports[1].dev);
327 kfree(card);
328}
329
330
331
332static int __init n2_run(unsigned long io, unsigned long irq,
333 unsigned long winbase, long valid0, long valid1)
334{
335 card_t *card;
336 u8 cnt, pcr;
337 int i;
338
339 if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) {
340 printk(KERN_ERR "n2: invalid I/O port value\n");
341 return -ENODEV;
342 }
343
344 if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ {
345 printk(KERN_ERR "n2: invalid IRQ value\n");
346 return -ENODEV;
347 }
348
349 if (winbase < 0xA0000 || winbase > 0xFFFFF || (winbase & 0xFFF) != 0) {
350 printk(KERN_ERR "n2: invalid RAM value\n");
351 return -ENODEV;
352 }
353
354 card = kmalloc(sizeof(card_t), GFP_KERNEL);
355 if (card == NULL) {
356 printk(KERN_ERR "n2: unable to allocate memory\n");
357 return -ENOBUFS;
358 }
359 memset(card, 0, sizeof(card_t));
360
361 card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
362 card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
363 if (!card->ports[0].dev || !card->ports[1].dev) {
364 printk(KERN_ERR "n2: unable to allocate memory\n");
365 n2_destroy_card(card);
366 return -ENOMEM;
367 }
368
369 if (!request_region(io, N2_IOPORTS, devname)) {
370 printk(KERN_ERR "n2: I/O port region in use\n");
371 n2_destroy_card(card);
372 return -EBUSY;
373 }
374 card->io = io;
375
376 if (request_irq(irq, &sca_intr, 0, devname, card)) {
377 printk(KERN_ERR "n2: could not allocate IRQ\n");
378 n2_destroy_card(card);
379 return(-EBUSY);
380 }
381 card->irq = irq;
382
383 if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
384 printk(KERN_ERR "n2: could not request RAM window\n");
385 n2_destroy_card(card);
386 return(-EBUSY);
387 }
388 card->phy_winbase = winbase;
389 card->winbase = ioremap(winbase, USE_WINDOWSIZE);
390
391 outb(0, io + N2_PCR);
392 outb(winbase >> 12, io + N2_BAR);
393
394 switch (USE_WINDOWSIZE) {
395 case 16384:
396 outb(WIN16K, io + N2_PSR);
397 break;
398
399 case 32768:
400 outb(WIN32K, io + N2_PSR);
401 break;
402
403 case 65536:
404 outb(WIN64K, io + N2_PSR);
405 break;
406
407 default:
408 printk(KERN_ERR "n2: invalid window size\n");
409 n2_destroy_card(card);
410 return -ENODEV;
411 }
412
413 pcr = PCR_ENWIN | PCR_VPM | (USE_BUS16BITS ? PCR_BUS16 : 0);
414 outb(pcr, io + N2_PCR);
415
416 card->ram_size = sca_detect_ram(card, card->winbase, MAX_RAM_SIZE);
417
418 /* number of TX + RX buffers for one port */
419 i = card->ram_size / ((valid0 + valid1) * (sizeof(pkt_desc) +
420 HDLC_MAX_MRU));
421
422 card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
423 card->rx_ring_buffers = i - card->tx_ring_buffers;
424
425 card->buff_offset = (valid0 + valid1) * sizeof(pkt_desc) *
426 (card->tx_ring_buffers + card->rx_ring_buffers);
427
428 printk(KERN_INFO "n2: RISCom/N2 %u KB RAM, IRQ%u, "
429 "using %u TX + %u RX packets rings\n", card->ram_size / 1024,
430 card->irq, card->tx_ring_buffers, card->rx_ring_buffers);
431
432 if (card->tx_ring_buffers < 1) {
433 printk(KERN_ERR "n2: RAM test failed\n");
434 n2_destroy_card(card);
435 return -EIO;
436 }
437
438 pcr |= PCR_RUNSCA; /* run SCA */
439 outb(pcr, io + N2_PCR);
440 outb(0, io + N2_MCR);
441
442 sca_init(card, 0);
443 for (cnt = 0; cnt < 2; cnt++) {
444 port_t *port = &card->ports[cnt];
445 struct net_device *dev = port_to_dev(port);
446 hdlc_device *hdlc = dev_to_hdlc(dev);
447
448 if ((cnt == 0 && !valid0) || (cnt == 1 && !valid1))
449 continue;
450
451 port->phy_node = cnt;
452 port->valid = 1;
453
454 if ((cnt == 1) && valid0)
455 port->log_node = 1;
456
457 spin_lock_init(&port->lock);
458 SET_MODULE_OWNER(dev);
459 dev->irq = irq;
460 dev->mem_start = winbase;
461 dev->mem_end = winbase + USE_WINDOWSIZE - 1;
462 dev->tx_queue_len = 50;
463 dev->do_ioctl = n2_ioctl;
464 dev->open = n2_open;
465 dev->stop = n2_close;
466 hdlc->attach = sca_attach;
467 hdlc->xmit = sca_xmit;
468 port->settings.clock_type = CLOCK_EXT;
469 port->card = card;
470
471 if (register_hdlc_device(dev)) {
472 printk(KERN_WARNING "n2: unable to register hdlc "
473 "device\n");
474 port->card = NULL;
475 n2_destroy_card(card);
476 return -ENOBUFS;
477 }
478 sca_init_sync_port(port); /* Set up SCA memory */
479
480 printk(KERN_INFO "%s: RISCom/N2 node %d\n",
481 dev->name, port->phy_node);
482 }
483
484 *new_card = card;
485 new_card = &card->next_card;
486
487 return 0;
488}
489
490
491
492static int __init n2_init(void)
493{
494 if (hw==NULL) {
495#ifdef MODULE
496 printk(KERN_INFO "n2: no card initialized\n");
497#endif
498 return -ENOSYS; /* no parameters specified, abort */
499 }
500
501 printk(KERN_INFO "%s\n", version);
502
503 do {
504 unsigned long io, irq, ram;
505 long valid[2] = { 0, 0 }; /* Default = both ports disabled */
506
507 io = simple_strtoul(hw, &hw, 0);
508
509 if (*hw++ != ',')
510 break;
511 irq = simple_strtoul(hw, &hw, 0);
512
513 if (*hw++ != ',')
514 break;
515 ram = simple_strtoul(hw, &hw, 0);
516
517 if (*hw++ != ',')
518 break;
519 while(1) {
520 if (*hw == '0' && !valid[0])
521 valid[0] = 1; /* Port 0 enabled */
522 else if (*hw == '1' && !valid[1])
523 valid[1] = 1; /* Port 1 enabled */
524 else
525 break;
526 hw++;
527 }
528
529 if (!valid[0] && !valid[1])
530 break; /* at least one port must be used */
531
532 if (*hw == ':' || *hw == '\x0')
533 n2_run(io, irq, ram, valid[0], valid[1]);
534
535 if (*hw == '\x0')
536 return first_card ? 0 : -ENOSYS;
537 }while(*hw++ == ':');
538
539 printk(KERN_ERR "n2: invalid hardware parameters\n");
540 return first_card ? 0 : -ENOSYS;
541}
542
543
544static void __exit n2_cleanup(void)
545{
546 card_t *card = first_card;
547
548 while (card) {
549 card_t *ptr = card;
550 card = card->next_card;
551 n2_destroy_card(ptr);
552 }
553}
554
555
556module_init(n2_init);
557module_exit(n2_cleanup);
558
559MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
560MODULE_DESCRIPTION("RISCom/N2 serial port driver");
561MODULE_LICENSE("GPL v2");
562module_param(hw, charp, 0444); /* hw=io,irq,ram,ports:io,irq,... */
diff --git a/drivers/net/wan/pc300-falc-lh.h b/drivers/net/wan/pc300-falc-lh.h
new file mode 100644
index 000000000000..01ed23ca76c7
--- /dev/null
+++ b/drivers/net/wan/pc300-falc-lh.h
@@ -0,0 +1,1238 @@
1/*
2 * falc.h Description of the Siemens FALC T1/E1 framer.
3 *
4 * Author: Ivan Passos <ivan@cyclades.com>
5 *
6 * Copyright: (c) 2000-2001 Cyclades Corp.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * $Log: falc-lh.h,v $
14 * Revision 3.1 2001/06/15 12:41:10 regina
15 * upping major version number
16 *
17 * Revision 1.1.1.1 2001/06/13 20:24:47 daniela
18 * PC300 initial CVS version (3.4.0-pre1)
19 *
20 * Revision 1.1 2000/05/15 ivan
21 * Included DJA bits for the LIM2 register.
22 *
23 * Revision 1.0 2000/02/22 ivan
24 * Initial version.
25 *
26 */
27
28#ifndef _FALC_LH_H
29#define _FALC_LH_H
30
31#define NUM_OF_T1_CHANNELS 24
32#define NUM_OF_E1_CHANNELS 32
33
34/*>>>>>>>>>>>>>>>>> FALC Register Bits (Transmit Mode) <<<<<<<<<<<<<<<<<<< */
35
36/* CMDR (Command Register)
37 ---------------- E1 & T1 ------------------------------ */
38#define CMDR_RMC 0x80
39#define CMDR_RRES 0x40
40#define CMDR_XREP 0x20
41#define CMDR_XRES 0x10
42#define CMDR_XHF 0x08
43#define CMDR_XTF 0x04
44#define CMDR_XME 0x02
45#define CMDR_SRES 0x01
46
47/* MODE (Mode Register)
48 ----------------- E1 & T1 ----------------------------- */
49#define MODE_MDS2 0x80
50#define MODE_MDS1 0x40
51#define MODE_MDS0 0x20
52#define MODE_BRAC 0x10
53#define MODE_HRAC 0x08
54
55/* IPC (Interrupt Port Configuration)
56 ----------------- E1 & T1 ----------------------------- */
57#define IPC_VIS 0x80
58#define IPC_SCI 0x04
59#define IPC_IC1 0x02
60#define IPC_IC0 0x01
61
62/* CCR1 (Common Configuration Register 1)
63 ----------------- E1 & T1 ----------------------------- */
64#define CCR1_SFLG 0x80
65#define CCR1_XTS16RA 0x40
66#define CCR1_BRM 0x40
67#define CCR1_CASSYM 0x20
68#define CCR1_EDLX 0x20
69#define CCR1_EITS 0x10
70#define CCR1_ITF 0x08
71#define CCR1_RFT1 0x02
72#define CCR1_RFT0 0x01
73
74/* CCR3 (Common Configuration Register 3)
75 ---------------- E1 & T1 ------------------------------ */
76
77#define CCR3_PRE1 0x80
78#define CCR3_PRE0 0x40
79#define CCR3_EPT 0x20
80#define CCR3_RADD 0x10
81#define CCR3_RCRC 0x04
82#define CCR3_XCRC 0x02
83
84
85/* RTR1-4 (Receive Timeslot Register 1-4)
86 ---------------- E1 & T1 ------------------------------ */
87
88#define RTR1_TS0 0x80
89#define RTR1_TS1 0x40
90#define RTR1_TS2 0x20
91#define RTR1_TS3 0x10
92#define RTR1_TS4 0x08
93#define RTR1_TS5 0x04
94#define RTR1_TS6 0x02
95#define RTR1_TS7 0x01
96
97#define RTR2_TS8 0x80
98#define RTR2_TS9 0x40
99#define RTR2_TS10 0x20
100#define RTR2_TS11 0x10
101#define RTR2_TS12 0x08
102#define RTR2_TS13 0x04
103#define RTR2_TS14 0x02
104#define RTR2_TS15 0x01
105
106#define RTR3_TS16 0x80
107#define RTR3_TS17 0x40
108#define RTR3_TS18 0x20
109#define RTR3_TS19 0x10
110#define RTR3_TS20 0x08
111#define RTR3_TS21 0x04
112#define RTR3_TS22 0x02
113#define RTR3_TS23 0x01
114
115#define RTR4_TS24 0x80
116#define RTR4_TS25 0x40
117#define RTR4_TS26 0x20
118#define RTR4_TS27 0x10
119#define RTR4_TS28 0x08
120#define RTR4_TS29 0x04
121#define RTR4_TS30 0x02
122#define RTR4_TS31 0x01
123
124
125/* TTR1-4 (Transmit Timeslot Register 1-4)
126 ---------------- E1 & T1 ------------------------------ */
127
128#define TTR1_TS0 0x80
129#define TTR1_TS1 0x40
130#define TTR1_TS2 0x20
131#define TTR1_TS3 0x10
132#define TTR1_TS4 0x08
133#define TTR1_TS5 0x04
134#define TTR1_TS6 0x02
135#define TTR1_TS7 0x01
136
137#define TTR2_TS8 0x80
138#define TTR2_TS9 0x40
139#define TTR2_TS10 0x20
140#define TTR2_TS11 0x10
141#define TTR2_TS12 0x08
142#define TTR2_TS13 0x04
143#define TTR2_TS14 0x02
144#define TTR2_TS15 0x01
145
146#define TTR3_TS16 0x80
147#define TTR3_TS17 0x40
148#define TTR3_TS18 0x20
149#define TTR3_TS19 0x10
150#define TTR3_TS20 0x08
151#define TTR3_TS21 0x04
152#define TTR3_TS22 0x02
153#define TTR3_TS23 0x01
154
155#define TTR4_TS24 0x80
156#define TTR4_TS25 0x40
157#define TTR4_TS26 0x20
158#define TTR4_TS27 0x10
159#define TTR4_TS28 0x08
160#define TTR4_TS29 0x04
161#define TTR4_TS30 0x02
162#define TTR4_TS31 0x01
163
164
165
166/* IMR0-4 (Interrupt Mask Register 0-4)
167
168 ----------------- E1 & T1 ----------------------------- */
169
170#define IMR0_RME 0x80
171#define IMR0_RFS 0x40
172#define IMR0_T8MS 0x20
173#define IMR0_ISF 0x20
174#define IMR0_RMB 0x10
175#define IMR0_CASC 0x08
176#define IMR0_RSC 0x08
177#define IMR0_CRC6 0x04
178#define IMR0_CRC4 0x04
179#define IMR0_PDEN 0x02
180#define IMR0_RPF 0x01
181
182#define IMR1_CASE 0x80
183#define IMR1_RDO 0x40
184#define IMR1_ALLS 0x20
185#define IMR1_XDU 0x10
186#define IMR1_XMB 0x08
187#define IMR1_XLSC 0x02
188#define IMR1_XPR 0x01
189#define IMR1_LLBSC 0x80
190
191#define IMR2_FAR 0x80
192#define IMR2_LFA 0x40
193#define IMR2_MFAR 0x20
194#define IMR2_T400MS 0x10
195#define IMR2_LMFA 0x10
196#define IMR2_AIS 0x08
197#define IMR2_LOS 0x04
198#define IMR2_RAR 0x02
199#define IMR2_RA 0x01
200
201#define IMR3_ES 0x80
202#define IMR3_SEC 0x40
203#define IMR3_LMFA16 0x20
204#define IMR3_AIS16 0x10
205#define IMR3_RA16 0x08
206#define IMR3_API 0x04
207#define IMR3_XSLP 0x20
208#define IMR3_XSLN 0x10
209#define IMR3_LLBSC 0x08
210#define IMR3_XRS 0x04
211#define IMR3_SLN 0x02
212#define IMR3_SLP 0x01
213
214#define IMR4_LFA 0x80
215#define IMR4_FER 0x40
216#define IMR4_CER 0x20
217#define IMR4_AIS 0x10
218#define IMR4_LOS 0x08
219#define IMR4_CVE 0x04
220#define IMR4_SLIP 0x02
221#define IMR4_EBE 0x01
222
223/* FMR0-5 for E1 and T1 (Framer Mode Register ) */
224
225#define FMR0_XC1 0x80
226#define FMR0_XC0 0x40
227#define FMR0_RC1 0x20
228#define FMR0_RC0 0x10
229#define FMR0_EXTD 0x08
230#define FMR0_ALM 0x04
231#define E1_FMR0_FRS 0x02
232#define T1_FMR0_FRS 0x08
233#define FMR0_SRAF 0x04
234#define FMR0_EXLS 0x02
235#define FMR0_SIM 0x01
236
237#define FMR1_MFCS 0x80
238#define FMR1_AFR 0x40
239#define FMR1_ENSA 0x20
240#define FMR1_CTM 0x80
241#define FMR1_SIGM 0x40
242#define FMR1_EDL 0x20
243#define FMR1_PMOD 0x10
244#define FMR1_XFS 0x08
245#define FMR1_CRC 0x08
246#define FMR1_ECM 0x04
247#define FMR1_IMOD 0x02
248#define FMR1_XAIS 0x01
249
250#define FMR2_RFS1 0x80
251#define FMR2_RFS0 0x40
252#define FMR2_MCSP 0x40
253#define FMR2_RTM 0x20
254#define FMR2_SSP 0x20
255#define FMR2_DAIS 0x10
256#define FMR2_SAIS 0x08
257#define FMR2_PLB 0x04
258#define FMR2_AXRA 0x02
259#define FMR2_ALMF 0x01
260#define FMR2_EXZE 0x01
261
262#define LOOP_RTM 0x40
263#define LOOP_SFM 0x40
264#define LOOP_ECLB 0x20
265#define LOOP_CLA 0x1f
266
267/*--------------------- E1 ----------------------------*/
268#define FMR3_XLD 0x20
269#define FMR3_XLU 0x10
270
271/*--------------------- T1 ----------------------------*/
272#define FMR4_AIS3 0x80
273#define FMR4_TM 0x40
274#define FMR4_XRA 0x20
275#define FMR4_SSC1 0x10
276#define FMR4_SSC0 0x08
277#define FMR4_AUTO 0x04
278#define FMR4_FM1 0x02
279#define FMR4_FM0 0x01
280
281#define FMR5_SRS 0x80
282#define FMR5_EIBR 0x40
283#define FMR5_XLD 0x20
284#define FMR5_XLU 0x10
285
286
287/* LOOP (Channel Loop Back)
288
289 ------------------ E1 & T1 ---------------------------- */
290
291#define LOOP_SFM 0x40
292#define LOOP_ECLB 0x20
293#define LOOP_CLA4 0x10
294#define LOOP_CLA3 0x08
295#define LOOP_CLA2 0x04
296#define LOOP_CLA1 0x02
297#define LOOP_CLA0 0x01
298
299
300
301/* XSW (Transmit Service Word Pulseframe)
302
303 ------------------- E1 --------------------------- */
304
305#define XSW_XSIS 0x80
306#define XSW_XTM 0x40
307#define XSW_XRA 0x20
308#define XSW_XY0 0x10
309#define XSW_XY1 0x08
310#define XSW_XY2 0x04
311#define XSW_XY3 0x02
312#define XSW_XY4 0x01
313
314
315/* XSP (Transmit Spare Bits)
316
317 ------------------- E1 --------------------------- */
318
319#define XSP_XAP 0x80
320#define XSP_CASEN 0x40
321#define XSP_TT0 0x20
322#define XSP_EBP 0x10
323#define XSP_AXS 0x08
324#define XSP_XSIF 0x04
325#define XSP_XS13 0x02
326#define XSP_XS15 0x01
327
328
329/* XC0/1 (Transmit Control 0/1)
330 ------------------ E1 & T1 ---------------------------- */
331
332#define XC0_SA8E 0x80
333#define XC0_SA7E 0x40
334#define XC0_SA6E 0x20
335#define XC0_SA5E 0x10
336#define XC0_SA4E 0x08
337#define XC0_BRM 0x80
338#define XC0_MFBS 0x40
339#define XC0_SFRZ 0x10
340#define XC0_XCO2 0x04
341#define XC0_XCO1 0x02
342#define XC0_XCO0 0x01
343
344#define XC1_XTO5 0x20
345#define XC1_XTO4 0x10
346#define XC1_XTO3 0x08
347#define XC1_XTO2 0x04
348#define XC1_XTO1 0x02
349#define XC1_XTO0 0x01
350
351
352/* RC0/1 (Receive Control 0/1)
353 ------------------ E1 & T1 ---------------------------- */
354
355#define RC0_SICS 0x40
356#define RC0_CRCI 0x20
357#define RC0_XCRCI 0x10
358#define RC0_RDIS 0x08
359#define RC0_RCO2 0x04
360#define RC0_RCO1 0x02
361#define RC0_RCO0 0x01
362
363#define RC1_SWD 0x80
364#define RC1_ASY4 0x40
365#define RC1_RRAM 0x40
366#define RC1_RTO5 0x20
367#define RC1_RTO4 0x10
368#define RC1_RTO3 0x08
369#define RC1_RTO2 0x04
370#define RC1_RTO1 0x02
371#define RC1_RTO0 0x01
372
373
374
375/* XPM0-2 (Transmit Pulse Mask 0-2)
376 --------------------- E1 & T1 ------------------------- */
377
378#define XPM0_XP12 0x80
379#define XPM0_XP11 0x40
380#define XPM0_XP10 0x20
381#define XPM0_XP04 0x10
382#define XPM0_XP03 0x08
383#define XPM0_XP02 0x04
384#define XPM0_XP01 0x02
385#define XPM0_XP00 0x01
386
387#define XPM1_XP30 0x80
388#define XPM1_XP24 0x40
389#define XPM1_XP23 0x20
390#define XPM1_XP22 0x10
391#define XPM1_XP21 0x08
392#define XPM1_XP20 0x04
393#define XPM1_XP14 0x02
394#define XPM1_XP13 0x01
395
396#define XPM2_XLHP 0x80
397#define XPM2_XLT 0x40
398#define XPM2_DAXLT 0x20
399#define XPM2_XP34 0x08
400#define XPM2_XP33 0x04
401#define XPM2_XP32 0x02
402#define XPM2_XP31 0x01
403
404
405/* TSWM (Transparent Service Word Mask)
406 ------------------ E1 ---------------------------- */
407
408#define TSWM_TSIS 0x80
409#define TSWM_TSIF 0x40
410#define TSWM_TRA 0x20
411#define TSWM_TSA4 0x10
412#define TSWM_TSA5 0x08
413#define TSWM_TSA6 0x04
414#define TSWM_TSA7 0x02
415#define TSWM_TSA8 0x01
416
417/* IDLE <Idle Channel Code Register>
418
419 ------------------ E1 & T1 ----------------------- */
420
421#define IDLE_IDL7 0x80
422#define IDLE_IDL6 0x40
423#define IDLE_IDL5 0x20
424#define IDLE_IDL4 0x10
425#define IDLE_IDL3 0x08
426#define IDLE_IDL2 0x04
427#define IDLE_IDL1 0x02
428#define IDLE_IDL0 0x01
429
430
431/* XSA4-8 <Transmit SA4-8 Register(Read/Write) >
432 -------------------E1 ----------------------------- */
433
434#define XSA4_XS47 0x80
435#define XSA4_XS46 0x40
436#define XSA4_XS45 0x20
437#define XSA4_XS44 0x10
438#define XSA4_XS43 0x08
439#define XSA4_XS42 0x04
440#define XSA4_XS41 0x02
441#define XSA4_XS40 0x01
442
443#define XSA5_XS57 0x80
444#define XSA5_XS56 0x40
445#define XSA5_XS55 0x20
446#define XSA5_XS54 0x10
447#define XSA5_XS53 0x08
448#define XSA5_XS52 0x04
449#define XSA5_XS51 0x02
450#define XSA5_XS50 0x01
451
452#define XSA6_XS67 0x80
453#define XSA6_XS66 0x40
454#define XSA6_XS65 0x20
455#define XSA6_XS64 0x10
456#define XSA6_XS63 0x08
457#define XSA6_XS62 0x04
458#define XSA6_XS61 0x02
459#define XSA6_XS60 0x01
460
461#define XSA7_XS77 0x80
462#define XSA7_XS76 0x40
463#define XSA7_XS75 0x20
464#define XSA7_XS74 0x10
465#define XSA7_XS73 0x08
466#define XSA7_XS72 0x04
467#define XSA7_XS71 0x02
468#define XSA7_XS70 0x01
469
470#define XSA8_XS87 0x80
471#define XSA8_XS86 0x40
472#define XSA8_XS85 0x20
473#define XSA8_XS84 0x10
474#define XSA8_XS83 0x08
475#define XSA8_XS82 0x04
476#define XSA8_XS81 0x02
477#define XSA8_XS80 0x01
478
479
480/* XDL1-3 (Transmit DL-Bit Register1-3 (read/write))
481 ----------------------- T1 --------------------- */
482
483#define XDL1_XDL17 0x80
484#define XDL1_XDL16 0x40
485#define XDL1_XDL15 0x20
486#define XDL1_XDL14 0x10
487#define XDL1_XDL13 0x08
488#define XDL1_XDL12 0x04
489#define XDL1_XDL11 0x02
490#define XDL1_XDL10 0x01
491
492#define XDL2_XDL27 0x80
493#define XDL2_XDL26 0x40
494#define XDL2_XDL25 0x20
495#define XDL2_XDL24 0x10
496#define XDL2_XDL23 0x08
497#define XDL2_XDL22 0x04
498#define XDL2_XDL21 0x02
499#define XDL2_XDL20 0x01
500
501#define XDL3_XDL37 0x80
502#define XDL3_XDL36 0x40
503#define XDL3_XDL35 0x20
504#define XDL3_XDL34 0x10
505#define XDL3_XDL33 0x08
506#define XDL3_XDL32 0x04
507#define XDL3_XDL31 0x02
508#define XDL3_XDL30 0x01
509
510
511/* ICB1-4 (Idle Channel Register 1-4)
512 ------------------ E1 ---------------------------- */
513
514#define E1_ICB1_IC0 0x80
515#define E1_ICB1_IC1 0x40
516#define E1_ICB1_IC2 0x20
517#define E1_ICB1_IC3 0x10
518#define E1_ICB1_IC4 0x08
519#define E1_ICB1_IC5 0x04
520#define E1_ICB1_IC6 0x02
521#define E1_ICB1_IC7 0x01
522
523#define E1_ICB2_IC8 0x80
524#define E1_ICB2_IC9 0x40
525#define E1_ICB2_IC10 0x20
526#define E1_ICB2_IC11 0x10
527#define E1_ICB2_IC12 0x08
528#define E1_ICB2_IC13 0x04
529#define E1_ICB2_IC14 0x02
530#define E1_ICB2_IC15 0x01
531
532#define E1_ICB3_IC16 0x80
533#define E1_ICB3_IC17 0x40
534#define E1_ICB3_IC18 0x20
535#define E1_ICB3_IC19 0x10
536#define E1_ICB3_IC20 0x08
537#define E1_ICB3_IC21 0x04
538#define E1_ICB3_IC22 0x02
539#define E1_ICB3_IC23 0x01
540
541#define E1_ICB4_IC24 0x80
542#define E1_ICB4_IC25 0x40
543#define E1_ICB4_IC26 0x20
544#define E1_ICB4_IC27 0x10
545#define E1_ICB4_IC28 0x08
546#define E1_ICB4_IC29 0x04
547#define E1_ICB4_IC30 0x02
548#define E1_ICB4_IC31 0x01
549
550/* ICB1-4 (Idle Channel Register 1-4)
551 ------------------ T1 ---------------------------- */
552
553#define T1_ICB1_IC1 0x80
554#define T1_ICB1_IC2 0x40
555#define T1_ICB1_IC3 0x20
556#define T1_ICB1_IC4 0x10
557#define T1_ICB1_IC5 0x08
558#define T1_ICB1_IC6 0x04
559#define T1_ICB1_IC7 0x02
560#define T1_ICB1_IC8 0x01
561
562#define T1_ICB2_IC9 0x80
563#define T1_ICB2_IC10 0x40
564#define T1_ICB2_IC11 0x20
565#define T1_ICB2_IC12 0x10
566#define T1_ICB2_IC13 0x08
567#define T1_ICB2_IC14 0x04
568#define T1_ICB2_IC15 0x02
569#define T1_ICB2_IC16 0x01
570
571#define T1_ICB3_IC17 0x80
572#define T1_ICB3_IC18 0x40
573#define T1_ICB3_IC19 0x20
574#define T1_ICB3_IC20 0x10
575#define T1_ICB3_IC21 0x08
576#define T1_ICB3_IC22 0x04
577#define T1_ICB3_IC23 0x02
578#define T1_ICB3_IC24 0x01
579
580/* FMR3 (Framer Mode Register 3)
581 --------------------E1------------------------ */
582
583#define FMR3_CMI 0x08
584#define FMR3_SYNSA 0x04
585#define FMR3_CFRZ 0x02
586#define FMR3_EXTIW 0x01
587
588
589
590/* CCB1-3 (Clear Channel Register)
591 ------------------- T1 ----------------------- */
592
593#define CCB1_CH1 0x80
594#define CCB1_CH2 0x40
595#define CCB1_CH3 0x20
596#define CCB1_CH4 0x10
597#define CCB1_CH5 0x08
598#define CCB1_CH6 0x04
599#define CCB1_CH7 0x02
600#define CCB1_CH8 0x01
601
602#define CCB2_CH9 0x80
603#define CCB2_CH10 0x40
604#define CCB2_CH11 0x20
605#define CCB2_CH12 0x10
606#define CCB2_CH13 0x08
607#define CCB2_CH14 0x04
608#define CCB2_CH15 0x02
609#define CCB2_CH16 0x01
610
611#define CCB3_CH17 0x80
612#define CCB3_CH18 0x40
613#define CCB3_CH19 0x20
614#define CCB3_CH20 0x10
615#define CCB3_CH21 0x08
616#define CCB3_CH22 0x04
617#define CCB3_CH23 0x02
618#define CCB3_CH24 0x01
619
620
621/* LIM0/1 (Line Interface Mode 0/1)
622 ------------------- E1 & T1 --------------------------- */
623
624#define LIM0_XFB 0x80
625#define LIM0_XDOS 0x40
626#define LIM0_SCL1 0x20
627#define LIM0_SCL0 0x10
628#define LIM0_EQON 0x08
629#define LIM0_ELOS 0x04
630#define LIM0_LL 0x02
631#define LIM0_MAS 0x01
632
633#define LIM1_EFSC 0x80
634#define LIM1_RIL2 0x40
635#define LIM1_RIL1 0x20
636#define LIM1_RIL0 0x10
637#define LIM1_DCOC 0x08
638#define LIM1_JATT 0x04
639#define LIM1_RL 0x02
640#define LIM1_DRS 0x01
641
642
643/* PCDR (Pulse Count Detection Register(Read/Write))
644 ------------------ E1 & T1 ------------------------- */
645
646#define PCDR_PCD7 0x80
647#define PCDR_PCD6 0x40
648#define PCDR_PCD5 0x20
649#define PCDR_PCD4 0x10
650#define PCDR_PCD3 0x08
651#define PCDR_PCD2 0x04
652#define PCDR_PCD1 0x02
653#define PCDR_PCD0 0x01
654
655#define PCRR_PCR7 0x80
656#define PCRR_PCR6 0x40
657#define PCRR_PCR5 0x20
658#define PCRR_PCR4 0x10
659#define PCRR_PCR3 0x08
660#define PCRR_PCR2 0x04
661#define PCRR_PCR1 0x02
662#define PCRR_PCR0 0x01
663
664
665/* LIM2 (Line Interface Mode 2)
666
667 ------------------ E1 & T1 ---------------------------- */
668
669#define LIM2_DJA2 0x20
670#define LIM2_DJA1 0x10
671#define LIM2_LOS2 0x02
672#define LIM2_LOS1 0x01
673
674/* LCR1 (Loop Code Register 1) */
675
676#define LCR1_EPRM 0x80
677#define LCR1_XPRBS 0x40
678
679/* SIC1 (System Interface Control 1) */
680#define SIC1_SRSC 0x80
681#define SIC1_RBS1 0x20
682#define SIC1_RBS0 0x10
683#define SIC1_SXSC 0x08
684#define SIC1_XBS1 0x02
685#define SIC1_XBS0 0x01
686
687/* DEC (Disable Error Counter)
688 ------------------ E1 & T1 ---------------------------- */
689
690#define DEC_DCEC3 0x20
691#define DEC_DBEC 0x10
692#define DEC_DCEC1 0x08
693#define DEC_DCEC 0x08
694#define DEC_DEBC 0x04
695#define DEC_DCVC 0x02
696#define DEC_DFEC 0x01
697
698
699/* FALC Register Bits (Receive Mode)
700 ---------------------------------------------------------------------------- */
701
702
703/* FRS0/1 (Framer Receive Status Register 0/1)
704 ----------------- E1 & T1 ---------------------------------- */
705
706#define FRS0_LOS 0x80
707#define FRS0_AIS 0x40
708#define FRS0_LFA 0x20
709#define FRS0_RRA 0x10
710#define FRS0_API 0x08
711#define FRS0_NMF 0x04
712#define FRS0_LMFA 0x02
713#define FRS0_FSRF 0x01
714
715#define FRS1_TS16RA 0x40
716#define FRS1_TS16LOS 0x20
717#define FRS1_TS16AIS 0x10
718#define FRS1_TS16LFA 0x08
719#define FRS1_EXZD 0x80
720#define FRS1_LLBDD 0x10
721#define FRS1_LLBAD 0x08
722#define FRS1_XLS 0x02
723#define FRS1_XLO 0x01
724#define FRS1_PDEN 0x40
725
726/* FRS2/3 (Framer Receive Status Register 2/3)
727 ----------------- T1 ---------------------------------- */
728
729#define FRS2_ESC2 0x80
730#define FRS2_ESC1 0x40
731#define FRS2_ESC0 0x20
732
733#define FRS3_FEH5 0x20
734#define FRS3_FEH4 0x10
735#define FRS3_FEH3 0x08
736#define FRS3_FEH2 0x04
737#define FRS3_FEH1 0x02
738#define FRS3_FEH0 0x01
739
740
741/* RSW (Receive Service Word Pulseframe)
742 ----------------- E1 ------------------------------ */
743
744#define RSW_RSI 0x80
745#define RSW_RRA 0x20
746#define RSW_RYO 0x10
747#define RSW_RY1 0x08
748#define RSW_RY2 0x04
749#define RSW_RY3 0x02
750#define RSW_RY4 0x01
751
752
753/* RSP (Receive Spare Bits / Additional Status)
754 ---------------- E1 ------------------------------- */
755
756#define RSP_SI1 0x80
757#define RSP_SI2 0x40
758#define RSP_LLBDD 0x10
759#define RSP_LLBAD 0x08
760#define RSP_RSIF 0x04
761#define RSP_RS13 0x02
762#define RSP_RS15 0x01
763
764
765/* FECL (Framing Error Counter)
766 ---------------- E1 & T1 -------------------------- */
767
768#define FECL_FE7 0x80
769#define FECL_FE6 0x40
770#define FECL_FE5 0x20
771#define FECL_FE4 0x10
772#define FECL_FE3 0x08
773#define FECL_FE2 0x04
774#define FECL_FE1 0x02
775#define FECL_FE0 0x01
776
777#define FECH_FE15 0x80
778#define FECH_FE14 0x40
779#define FECH_FE13 0x20
780#define FECH_FE12 0x10
781#define FECH_FE11 0x08
782#define FECH_FE10 0x04
783#define FECH_FE9 0x02
784#define FECH_FE8 0x01
785
786
787/* CVCl (Code Violation Counter)
788 ----------------- E1 ------------------------- */
789
790#define CVCL_CV7 0x80
791#define CVCL_CV6 0x40
792#define CVCL_CV5 0x20
793#define CVCL_CV4 0x10
794#define CVCL_CV3 0x08
795#define CVCL_CV2 0x04
796#define CVCL_CV1 0x02
797#define CVCL_CV0 0x01
798
799#define CVCH_CV15 0x80
800#define CVCH_CV14 0x40
801#define CVCH_CV13 0x20
802#define CVCH_CV12 0x10
803#define CVCH_CV11 0x08
804#define CVCH_CV10 0x04
805#define CVCH_CV9 0x02
806#define CVCH_CV8 0x01
807
808
809/* CEC1-3L (CRC Error Counter)
810 ------------------ E1 ----------------------------- */
811
812#define CEC1L_CR7 0x80
813#define CEC1L_CR6 0x40
814#define CEC1L_CR5 0x20
815#define CEC1L_CR4 0x10
816#define CEC1L_CR3 0x08
817#define CEC1L_CR2 0x04
818#define CEC1L_CR1 0x02
819#define CEC1L_CR0 0x01
820
821#define CEC1H_CR15 0x80
822#define CEC1H_CR14 0x40
823#define CEC1H_CR13 0x20
824#define CEC1H_CR12 0x10
825#define CEC1H_CR11 0x08
826#define CEC1H_CR10 0x04
827#define CEC1H_CR9 0x02
828#define CEC1H_CR8 0x01
829
830#define CEC2L_CR7 0x80
831#define CEC2L_CR6 0x40
832#define CEC2L_CR5 0x20
833#define CEC2L_CR4 0x10
834#define CEC2L_CR3 0x08
835#define CEC2L_CR2 0x04
836#define CEC2L_CR1 0x02
837#define CEC2L_CR0 0x01
838
839#define CEC2H_CR15 0x80
840#define CEC2H_CR14 0x40
841#define CEC2H_CR13 0x20
842#define CEC2H_CR12 0x10
843#define CEC2H_CR11 0x08
844#define CEC2H_CR10 0x04
845#define CEC2H_CR9 0x02
846#define CEC2H_CR8 0x01
847
848#define CEC3L_CR7 0x80
849#define CEC3L_CR6 0x40
850#define CEC3L_CR5 0x20
851#define CEC3L_CR4 0x10
852#define CEC3L_CR3 0x08
853#define CEC3L_CR2 0x04
854#define CEC3L_CR1 0x02
855#define CEC3L_CR0 0x01
856
857#define CEC3H_CR15 0x80
858#define CEC3H_CR14 0x40
859#define CEC3H_CR13 0x20
860#define CEC3H_CR12 0x10
861#define CEC3H_CR11 0x08
862#define CEC3H_CR10 0x04
863#define CEC3H_CR9 0x02
864#define CEC3H_CR8 0x01
865
866
867/* CECL (CRC Error Counter)
868
869 ------------------ T1 ----------------------------- */
870
871#define CECL_CR7 0x80
872#define CECL_CR6 0x40
873#define CECL_CR5 0x20
874#define CECL_CR4 0x10
875#define CECL_CR3 0x08
876#define CECL_CR2 0x04
877#define CECL_CR1 0x02
878#define CECL_CR0 0x01
879
880#define CECH_CR15 0x80
881#define CECH_CR14 0x40
882#define CECH_CR13 0x20
883#define CECH_CR12 0x10
884#define CECH_CR11 0x08
885#define CECH_CR10 0x04
886#define CECH_CR9 0x02
887#define CECH_CR8 0x01
888
889/* EBCL (E Bit Error Counter)
890 ------------------- E1 & T1 ------------------------- */
891
892#define EBCL_EB7 0x80
893#define EBCL_EB6 0x40
894#define EBCL_EB5 0x20
895#define EBCL_EB4 0x10
896#define EBCL_EB3 0x08
897#define EBCL_EB2 0x04
898#define EBCL_EB1 0x02
899#define EBCL_EB0 0x01
900
901#define EBCH_EB15 0x80
902#define EBCH_EB14 0x40
903#define EBCH_EB13 0x20
904#define EBCH_EB12 0x10
905#define EBCH_EB11 0x08
906#define EBCH_EB10 0x04
907#define EBCH_EB9 0x02
908#define EBCH_EB8 0x01
909
910
911/* RSA4-8 (Receive Sa4-8-Bit Register)
912 -------------------- E1 --------------------------- */
913
914#define RSA4_RS47 0x80
915#define RSA4_RS46 0x40
916#define RSA4_RS45 0x20
917#define RSA4_RS44 0x10
918#define RSA4_RS43 0x08
919#define RSA4_RS42 0x04
920#define RSA4_RS41 0x02
921#define RSA4_RS40 0x01
922
923#define RSA5_RS57 0x80
924#define RSA5_RS56 0x40
925#define RSA5_RS55 0x20
926#define RSA5_RS54 0x10
927#define RSA5_RS53 0x08
928#define RSA5_RS52 0x04
929#define RSA5_RS51 0x02
930#define RSA5_RS50 0x01
931
932#define RSA6_RS67 0x80
933#define RSA6_RS66 0x40
934#define RSA6_RS65 0x20
935#define RSA6_RS64 0x10
936#define RSA6_RS63 0x08
937#define RSA6_RS62 0x04
938#define RSA6_RS61 0x02
939#define RSA6_RS60 0x01
940
941#define RSA7_RS77 0x80
942#define RSA7_RS76 0x40
943#define RSA7_RS75 0x20
944#define RSA7_RS74 0x10
945#define RSA7_RS73 0x08
946#define RSA7_RS72 0x04
947#define RSA7_RS71 0x02
948#define RSA7_RS70 0x01
949
950#define RSA8_RS87 0x80
951#define RSA8_RS86 0x40
952#define RSA8_RS85 0x20
953#define RSA8_RS84 0x10
954#define RSA8_RS83 0x08
955#define RSA8_RS82 0x04
956#define RSA8_RS81 0x02
957#define RSA8_RS80 0x01
958
959/* RSA6S (Receive Sa6 Bit Status Register)
960 ------------------------ T1 ------------------------- */
961
962#define RSA6S_SX 0x20
963#define RSA6S_SF 0x10
964#define RSA6S_SE 0x08
965#define RSA6S_SC 0x04
966#define RSA6S_SA 0x02
967#define RSA6S_S8 0x01
968
969
970/* RDL1-3 Receive DL-Bit Register1-3)
971 ------------------------ T1 ------------------------- */
972
973#define RDL1_RDL17 0x80
974#define RDL1_RDL16 0x40
975#define RDL1_RDL15 0x20
976#define RDL1_RDL14 0x10
977#define RDL1_RDL13 0x08
978#define RDL1_RDL12 0x04
979#define RDL1_RDL11 0x02
980#define RDL1_RDL10 0x01
981
982#define RDL2_RDL27 0x80
983#define RDL2_RDL26 0x40
984#define RDL2_RDL25 0x20
985#define RDL2_RDL24 0x10
986#define RDL2_RDL23 0x08
987#define RDL2_RDL22 0x04
988#define RDL2_RDL21 0x02
989#define RDL2_RDL20 0x01
990
991#define RDL3_RDL37 0x80
992#define RDL3_RDL36 0x40
993#define RDL3_RDL35 0x20
994#define RDL3_RDL34 0x10
995#define RDL3_RDL33 0x08
996#define RDL3_RDL32 0x04
997#define RDL3_RDL31 0x02
998#define RDL3_RDL30 0x01
999
1000
1001/* SIS (Signaling Status Register)
1002
1003 -------------------- E1 & T1 -------------------------- */
1004
1005#define SIS_XDOV 0x80
1006#define SIS_XFW 0x40
1007#define SIS_XREP 0x20
1008#define SIS_RLI 0x08
1009#define SIS_CEC 0x04
1010#define SIS_BOM 0x01
1011
1012
1013/* RSIS (Receive Signaling Status Register)
1014
1015 -------------------- E1 & T1 --------------------------- */
1016
1017#define RSIS_VFR 0x80
1018#define RSIS_RDO 0x40
1019#define RSIS_CRC16 0x20
1020#define RSIS_RAB 0x10
1021#define RSIS_HA1 0x08
1022#define RSIS_HA0 0x04
1023#define RSIS_HFR 0x02
1024#define RSIS_LA 0x01
1025
1026
1027/* RBCL/H (Receive Byte Count Low/High)
1028
1029 ------------------- E1 & T1 ----------------------- */
1030
1031#define RBCL_RBC7 0x80
1032#define RBCL_RBC6 0x40
1033#define RBCL_RBC5 0x20
1034#define RBCL_RBC4 0x10
1035#define RBCL_RBC3 0x08
1036#define RBCL_RBC2 0x04
1037#define RBCL_RBC1 0x02
1038#define RBCL_RBC0 0x01
1039
1040#define RBCH_OV 0x10
1041#define RBCH_RBC11 0x08
1042#define RBCH_RBC10 0x04
1043#define RBCH_RBC9 0x02
1044#define RBCH_RBC8 0x01
1045
1046
1047/* ISR1-3 (Interrupt Status Register 1-3)
1048
1049 ------------------ E1 & T1 ------------------------------ */
1050
1051#define FISR0_RME 0x80
1052#define FISR0_RFS 0x40
1053#define FISR0_T8MS 0x20
1054#define FISR0_ISF 0x20
1055#define FISR0_RMB 0x10
1056#define FISR0_CASC 0x08
1057#define FISR0_RSC 0x08
1058#define FISR0_CRC6 0x04
1059#define FISR0_CRC4 0x04
1060#define FISR0_PDEN 0x02
1061#define FISR0_RPF 0x01
1062
1063#define FISR1_CASE 0x80
1064#define FISR1_LLBSC 0x80
1065#define FISR1_RDO 0x40
1066#define FISR1_ALLS 0x20
1067#define FISR1_XDU 0x10
1068#define FISR1_XMB 0x08
1069#define FISR1_XLSC 0x02
1070#define FISR1_XPR 0x01
1071
1072#define FISR2_FAR 0x80
1073#define FISR2_LFA 0x40
1074#define FISR2_MFAR 0x20
1075#define FISR2_T400MS 0x10
1076#define FISR2_LMFA 0x10
1077#define FISR2_AIS 0x08
1078#define FISR2_LOS 0x04
1079#define FISR2_RAR 0x02
1080#define FISR2_RA 0x01
1081
1082#define FISR3_ES 0x80
1083#define FISR3_SEC 0x40
1084#define FISR3_LMFA16 0x20
1085#define FISR3_AIS16 0x10
1086#define FISR3_RA16 0x08
1087#define FISR3_API 0x04
1088#define FISR3_XSLP 0x20
1089#define FISR3_XSLN 0x10
1090#define FISR3_LLBSC 0x08
1091#define FISR3_XRS 0x04
1092#define FISR3_SLN 0x02
1093#define FISR3_SLP 0x01
1094
1095
1096/* GIS (Global Interrupt Status Register)
1097
1098 --------------------- E1 & T1 --------------------- */
1099
1100#define GIS_ISR3 0x08
1101#define GIS_ISR2 0x04
1102#define GIS_ISR1 0x02
1103#define GIS_ISR0 0x01
1104
1105
1106/* VSTR (Version Status Register)
1107
1108 --------------------- E1 & T1 --------------------- */
1109
1110#define VSTR_VN3 0x08
1111#define VSTR_VN2 0x04
1112#define VSTR_VN1 0x02
1113#define VSTR_VN0 0x01
1114
1115
1116/*>>>>>>>>>>>>>>>>>>>>> Local Control Structures <<<<<<<<<<<<<<<<<<<<<<<<< */
1117
1118/* Write-only Registers (E1/T1 control mode write registers) */
1119#define XFIFOH 0x00 /* Tx FIFO High Byte */
1120#define XFIFOL 0x01 /* Tx FIFO Low Byte */
1121#define CMDR 0x02 /* Command Reg */
1122#define DEC 0x60 /* Disable Error Counter */
1123#define TEST2 0x62 /* Manuf. Test Reg 2 */
1124#define XS(nbr) (0x70 + (nbr)) /* Tx CAS Reg (0 to 15) */
1125
1126/* Read-write Registers (E1/T1 status mode read registers) */
1127#define MODE 0x03 /* Mode Reg */
1128#define RAH1 0x04 /* Receive Address High 1 */
1129#define RAH2 0x05 /* Receive Address High 2 */
1130#define RAL1 0x06 /* Receive Address Low 1 */
1131#define RAL2 0x07 /* Receive Address Low 2 */
1132#define IPC 0x08 /* Interrupt Port Configuration */
1133#define CCR1 0x09 /* Common Configuration Reg 1 */
1134#define CCR3 0x0A /* Common Configuration Reg 3 */
1135#define PRE 0x0B /* Preamble Reg */
1136#define RTR1 0x0C /* Receive Timeslot Reg 1 */
1137#define RTR2 0x0D /* Receive Timeslot Reg 2 */
1138#define RTR3 0x0E /* Receive Timeslot Reg 3 */
1139#define RTR4 0x0F /* Receive Timeslot Reg 4 */
1140#define TTR1 0x10 /* Transmit Timeslot Reg 1 */
1141#define TTR2 0x11 /* Transmit Timeslot Reg 2 */
1142#define TTR3 0x12 /* Transmit Timeslot Reg 3 */
1143#define TTR4 0x13 /* Transmit Timeslot Reg 4 */
1144#define IMR0 0x14 /* Interrupt Mask Reg 0 */
1145#define IMR1 0x15 /* Interrupt Mask Reg 1 */
1146#define IMR2 0x16 /* Interrupt Mask Reg 2 */
1147#define IMR3 0x17 /* Interrupt Mask Reg 3 */
1148#define IMR4 0x18 /* Interrupt Mask Reg 4 */
1149#define IMR5 0x19 /* Interrupt Mask Reg 5 */
1150#define FMR0 0x1A /* Framer Mode Reigster 0 */
1151#define FMR1 0x1B /* Framer Mode Reigster 1 */
1152#define FMR2 0x1C /* Framer Mode Reigster 2 */
1153#define LOOP 0x1D /* Channel Loop Back */
1154#define XSW 0x1E /* Transmit Service Word */
1155#define FMR4 0x1E /* Framer Mode Reg 4 */
1156#define XSP 0x1F /* Transmit Spare Bits */
1157#define FMR5 0x1F /* Framer Mode Reg 5 */
1158#define XC0 0x20 /* Transmit Control 0 */
1159#define XC1 0x21 /* Transmit Control 1 */
1160#define RC0 0x22 /* Receive Control 0 */
1161#define RC1 0x23 /* Receive Control 1 */
1162#define XPM0 0x24 /* Transmit Pulse Mask 0 */
1163#define XPM1 0x25 /* Transmit Pulse Mask 1 */
1164#define XPM2 0x26 /* Transmit Pulse Mask 2 */
1165#define TSWM 0x27 /* Transparent Service Word Mask */
1166#define TEST1 0x28 /* Manuf. Test Reg 1 */
1167#define IDLE 0x29 /* Idle Channel Code */
1168#define XSA4 0x2A /* Transmit SA4 Bit Reg */
1169#define XDL1 0x2A /* Transmit DL-Bit Reg 2 */
1170#define XSA5 0x2B /* Transmit SA4 Bit Reg */
1171#define XDL2 0x2B /* Transmit DL-Bit Reg 2 */
1172#define XSA6 0x2C /* Transmit SA4 Bit Reg */
1173#define XDL3 0x2C /* Transmit DL-Bit Reg 2 */
1174#define XSA7 0x2D /* Transmit SA4 Bit Reg */
1175#define CCB1 0x2D /* Clear Channel Reg 1 */
1176#define XSA8 0x2E /* Transmit SA4 Bit Reg */
1177#define CCB2 0x2E /* Clear Channel Reg 2 */
1178#define FMR3 0x2F /* Framer Mode Reg. 3 */
1179#define CCB3 0x2F /* Clear Channel Reg 3 */
1180#define ICB1 0x30 /* Idle Channel Reg 1 */
1181#define ICB2 0x31 /* Idle Channel Reg 2 */
1182#define ICB3 0x32 /* Idle Channel Reg 3 */
1183#define ICB4 0x33 /* Idle Channel Reg 4 */
1184#define LIM0 0x34 /* Line Interface Mode 0 */
1185#define LIM1 0x35 /* Line Interface Mode 1 */
1186#define PCDR 0x36 /* Pulse Count Detection */
1187#define PCRR 0x37 /* Pulse Count Recovery */
1188#define LIM2 0x38 /* Line Interface Mode Reg 2 */
1189#define LCR1 0x39 /* Loop Code Reg 1 */
1190#define LCR2 0x3A /* Loop Code Reg 2 */
1191#define LCR3 0x3B /* Loop Code Reg 3 */
1192#define SIC1 0x3C /* System Interface Control 1 */
1193
1194/* Read-only Registers (E1/T1 control mode read registers) */
1195#define RFIFOH 0x00 /* Receive FIFO */
1196#define RFIFOL 0x01 /* Receive FIFO */
1197#define FRS0 0x4C /* Framer Receive Status 0 */
1198#define FRS1 0x4D /* Framer Receive Status 1 */
1199#define RSW 0x4E /* Receive Service Word */
1200#define FRS2 0x4E /* Framer Receive Status 2 */
1201#define RSP 0x4F /* Receive Spare Bits */
1202#define FRS3 0x4F /* Framer Receive Status 3 */
1203#define FECL 0x50 /* Framing Error Counter */
1204#define FECH 0x51 /* Framing Error Counter */
1205#define CVCL 0x52 /* Code Violation Counter */
1206#define CVCH 0x53 /* Code Violation Counter */
1207#define CECL 0x54 /* CRC Error Counter 1 */
1208#define CECH 0x55 /* CRC Error Counter 1 */
1209#define EBCL 0x56 /* E-Bit Error Counter */
1210#define EBCH 0x57 /* E-Bit Error Counter */
1211#define BECL 0x58 /* Bit Error Counter Low */
1212#define BECH 0x59 /* Bit Error Counter Low */
1213#define CEC3 0x5A /* CRC Error Counter 3 (16-bit) */
1214#define RSA4 0x5C /* Receive SA4 Bit Reg */
1215#define RDL1 0x5C /* Receive DL-Bit Reg 1 */
1216#define RSA5 0x5D /* Receive SA5 Bit Reg */
1217#define RDL2 0x5D /* Receive DL-Bit Reg 2 */
1218#define RSA6 0x5E /* Receive SA6 Bit Reg */
1219#define RDL3 0x5E /* Receive DL-Bit Reg 3 */
1220#define RSA7 0x5F /* Receive SA7 Bit Reg */
1221#define RSA8 0x60 /* Receive SA8 Bit Reg */
1222#define RSA6S 0x61 /* Receive SA6 Bit Status Reg */
1223#define TSR0 0x62 /* Manuf. Test Reg 0 */
1224#define TSR1 0x63 /* Manuf. Test Reg 1 */
1225#define SIS 0x64 /* Signaling Status Reg */
1226#define RSIS 0x65 /* Receive Signaling Status Reg */
1227#define RBCL 0x66 /* Receive Byte Control */
1228#define RBCH 0x67 /* Receive Byte Control */
1229#define FISR0 0x68 /* Interrupt Status Reg 0 */
1230#define FISR1 0x69 /* Interrupt Status Reg 1 */
1231#define FISR2 0x6A /* Interrupt Status Reg 2 */
1232#define FISR3 0x6B /* Interrupt Status Reg 3 */
1233#define GIS 0x6E /* Global Interrupt Status */
1234#define VSTR 0x6F /* Version Status */
1235#define RS(nbr) (0x70 + (nbr)) /* Rx CAS Reg (0 to 15) */
1236
1237#endif /* _FALC_LH_H */
1238
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h
new file mode 100644
index 000000000000..73401b0f0151
--- /dev/null
+++ b/drivers/net/wan/pc300.h
@@ -0,0 +1,497 @@
1/*
2 * pc300.h Cyclades-PC300(tm) Kernel API Definitions.
3 *
4 * Author: Ivan Passos <ivan@cyclades.com>
5 *
6 * Copyright: (c) 1999-2002 Cyclades Corp.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * $Log: pc300.h,v $
14 * Revision 3.12 2002/03/07 14:17:09 henrique
15 * License data fixed
16 *
17 * Revision 3.11 2002/01/28 21:09:39 daniela
18 * Included ';' after pc300hw.bus.
19 *
20 * Revision 3.10 2002/01/17 17:58:52 ivan
21 * Support for PC300-TE/M (PMC).
22 *
23 * Revision 3.9 2001/09/28 13:30:53 daniela
24 * Renamed dma_start routine to rx_dma_start.
25 *
26 * Revision 3.8 2001/09/24 13:03:45 daniela
27 * Fixed BOF interrupt treatment. Created dma_start routine.
28 *
29 * Revision 3.7 2001/08/10 17:19:58 daniela
30 * Fixed IOCTLs defines.
31 *
32 * Revision 3.6 2001/07/18 19:24:42 daniela
33 * Included kernel version.
34 *
35 * Revision 3.5 2001/07/05 18:38:08 daniela
36 * DMA transmission bug fix.
37 *
38 * Revision 3.4 2001/06/26 17:10:40 daniela
39 * New configuration parameters (line code, CRC calculation and clock).
40 *
41 * Revision 3.3 2001/06/22 13:13:02 regina
42 * MLPPP implementation
43 *
44 * Revision 3.2 2001/06/18 17:56:09 daniela
45 * Increased DEF_MTU and TX_QUEUE_LEN.
46 *
47 * Revision 3.1 2001/06/15 12:41:10 regina
48 * upping major version number
49 *
50 * Revision 1.1.1.1 2001/06/13 20:25:06 daniela
51 * PC300 initial CVS version (3.4.0-pre1)
52 *
53 * Revision 2.3 2001/03/05 daniela
54 * Created struct pc300conf, to provide the hardware information to pc300util.
55 * Inclusion of 'alloc_ramsize' field on structure 'pc300hw'.
56 *
57 * Revision 2.2 2000/12/22 daniela
58 * Structures and defines to support pc300util: statistics, status,
59 * loopback tests, trace.
60 *
61 * Revision 2.1 2000/09/28 ivan
62 * Inclusion of 'iophys' and 'iosize' fields on structure 'pc300hw', to
63 * allow release of I/O region at module unload.
64 * Changed location of include files.
65 *
66 * Revision 2.0 2000/03/27 ivan
67 * Added support for the PC300/TE cards.
68 *
69 * Revision 1.1 2000/01/31 ivan
70 * Replaced 'pc300[drv|sca].h' former PC300 driver include files.
71 *
72 * Revision 1.0 1999/12/16 ivan
73 * First official release.
74 * Inclusion of 'nchan' field on structure 'pc300hw', to allow variable
75 * number of ports per card.
76 * Inclusion of 'if_ptr' field on structure 'pc300dev'.
77 *
78 * Revision 0.6 1999/11/17 ivan
79 * Changed X.25-specific function names to comply with adopted convention.
80 *
81 * Revision 0.5 1999/11/16 Daniela Squassoni
82 * X.25 support.
83 *
84 * Revision 0.4 1999/11/15 ivan
85 * Inclusion of 'clock' field on structure 'pc300hw'.
86 *
87 * Revision 0.3 1999/11/10 ivan
88 * IOCTL name changing.
89 * Inclusion of driver function prototypes.
90 *
91 * Revision 0.2 1999/11/03 ivan
92 * Inclusion of 'tx_skb' and union 'ifu' on structure 'pc300dev'.
93 *
94 * Revision 0.1 1999/01/15 ivan
95 * Initial version.
96 *
97 */
98
99#ifndef _PC300_H
100#define _PC300_H
101
102#include <linux/hdlc.h>
103#include "hd64572.h"
104#include "pc300-falc-lh.h"
105
106#ifndef CY_TYPES
107#define CY_TYPES
108typedef __u64 ucdouble; /* 64 bits, unsigned */
109typedef __u32 uclong; /* 32 bits, unsigned */
110typedef __u16 ucshort; /* 16 bits, unsigned */
111typedef __u8 ucchar; /* 8 bits, unsigned */
112#endif /* CY_TYPES */
113
114#define PC300_PROTO_MLPPP 1
115
116#define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */
117
118#define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */
119#define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */
120
121#define PC300_MAXCARDS 4 /* Max number of cards per system */
122#define PC300_MAXCHAN 2 /* Number of channels per card */
123
124#define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */
125#define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */
126#define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */
127#define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */
128
129#define PC300_OSC_CLOCK 24576000
130#define PC300_PCI_CLOCK 33000000
131
132#define BD_DEF_LEN 0x0800 /* DMA buffer length (2KB) */
133#define DMA_TX_MEMSZ 0x8000 /* Total DMA Tx memory size (32KB/ch) */
134#define DMA_RX_MEMSZ 0x10000 /* Total DMA Rx memory size (64KB/ch) */
135
136#define N_DMA_TX_BUF (DMA_TX_MEMSZ / BD_DEF_LEN) /* DMA Tx buffers */
137#define N_DMA_RX_BUF (DMA_RX_MEMSZ / BD_DEF_LEN) /* DMA Rx buffers */
138
139/* DMA Buffer Offsets */
140#define DMA_TX_BASE ((N_DMA_TX_BUF + N_DMA_RX_BUF) * \
141 PC300_MAXCHAN * sizeof(pcsca_bd_t))
142#define DMA_RX_BASE (DMA_TX_BASE + PC300_MAXCHAN*DMA_TX_MEMSZ)
143
144/* DMA Descriptor Offsets */
145#define DMA_TX_BD_BASE 0x0000
146#define DMA_RX_BD_BASE (DMA_TX_BD_BASE + ((PC300_MAXCHAN*DMA_TX_MEMSZ / \
147 BD_DEF_LEN) * sizeof(pcsca_bd_t)))
148
149/* DMA Descriptor Macros */
150#define TX_BD_ADDR(chan, n) (DMA_TX_BD_BASE + \
151 ((N_DMA_TX_BUF*chan) + n) * sizeof(pcsca_bd_t))
152#define RX_BD_ADDR(chan, n) (DMA_RX_BD_BASE + \
153 ((N_DMA_RX_BUF*chan) + n) * sizeof(pcsca_bd_t))
154
155/* Macro to access the FALC registers (TE only) */
156#define F_REG(reg, chan) (0x200*(chan) + ((reg)<<2))
157
158/***************************************
159 * Memory access functions/macros *
160 * (required to support Alpha systems) *
161 ***************************************/
162#ifdef __KERNEL__
163#define cpc_writeb(port,val) {writeb((ucchar)(val),(port)); mb();}
164#define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();}
165#define cpc_writel(port,val) {writel((uclong)(val),(port)); mb();}
166
167#define cpc_readb(port) readb(port)
168#define cpc_readw(port) readw(port)
169#define cpc_readl(port) readl(port)
170
171#else /* __KERNEL__ */
172#define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val))
173#define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val))
174#define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val))
175
176#define cpc_readb(port) (*(volatile ucchar *)(port))
177#define cpc_readw(port) (*(volatile ucshort *)(port))
178#define cpc_readl(port) (*(volatile uclong *)(port))
179
180#endif /* __KERNEL__ */
181
182/****** Data Structures *****************************************************/
183
184/*
185 * RUNTIME_9050 - PLX PCI9050-1 local configuration and shared runtime
186 * registers. This structure can be used to access the 9050 registers
187 * (memory mapped).
188 */
189struct RUNTIME_9050 {
190 uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
191 uclong loc_rom_range; /* 10h : Local ROM Range */
192 uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
193 uclong loc_rom_base; /* 24h : Local ROM Base */
194 uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
195 uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */
196 uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
197 uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
198 uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
199};
200
201#define PLX_9050_LINT1_ENABLE 0x01
202#define PLX_9050_LINT1_POL 0x02
203#define PLX_9050_LINT1_STATUS 0x04
204#define PLX_9050_LINT2_ENABLE 0x08
205#define PLX_9050_LINT2_POL 0x10
206#define PLX_9050_LINT2_STATUS 0x20
207#define PLX_9050_INTR_ENABLE 0x40
208#define PLX_9050_SW_INTR 0x80
209
210/* Masks to access the init_ctrl PLX register */
211#define PC300_CLKSEL_MASK (0x00000004UL)
212#define PC300_CHMEDIA_MASK(chan) (0x00000020UL<<(chan*3))
213#define PC300_CTYPE_MASK (0x00000800UL)
214
215/* CPLD Registers (base addr = falcbase, TE only) */
216/* CPLD v. 0 */
217#define CPLD_REG1 0x140 /* Chip resets, DCD/CTS status */
218#define CPLD_REG2 0x144 /* Clock enable , LED control */
219/* CPLD v. 2 or higher */
220#define CPLD_V2_REG1 0x100 /* Chip resets, DCD/CTS status */
221#define CPLD_V2_REG2 0x104 /* Clock enable , LED control */
222#define CPLD_ID_REG 0x108 /* CPLD version */
223
224/* CPLD Register bit description: for the FALC bits, they should always be
225 set based on the channel (use (bit<<(2*ch)) to access the correct bit for
226 that channel) */
227#define CPLD_REG1_FALC_RESET 0x01
228#define CPLD_REG1_SCA_RESET 0x02
229#define CPLD_REG1_GLOBAL_CLK 0x08
230#define CPLD_REG1_FALC_DCD 0x10
231#define CPLD_REG1_FALC_CTS 0x20
232
233#define CPLD_REG2_FALC_TX_CLK 0x01
234#define CPLD_REG2_FALC_RX_CLK 0x02
235#define CPLD_REG2_FALC_LED1 0x10
236#define CPLD_REG2_FALC_LED2 0x20
237
238/* Structure with FALC-related fields (TE only) */
239#define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */
240
241typedef struct falc {
242 ucchar sync; /* If true FALC is synchronized */
243 ucchar active; /* if TRUE then already active */
244 ucchar loop_active; /* if TRUE a line loopback UP was received */
245 ucchar loop_gen; /* if TRUE a line loopback UP was issued */
246
247 ucchar num_channels;
248 ucchar offset; /* 1 for T1, 0 for E1 */
249 ucchar full_bandwidth;
250
251 ucchar xmb_cause;
252 ucchar multiframe_mode;
253
254 /* Statistics */
255 ucshort pden; /* Pulse Density violation count */
256 ucshort los; /* Loss of Signal count */
257 ucshort losr; /* Loss of Signal recovery count */
258 ucshort lfa; /* Loss of frame alignment count */
259 ucshort farec; /* Frame Alignment Recovery count */
260 ucshort lmfa; /* Loss of multiframe alignment count */
261 ucshort ais; /* Remote Alarm indication Signal count */
262 ucshort sec; /* One-second timer */
263 ucshort es; /* Errored second */
264 ucshort rai; /* remote alarm received */
265 ucshort bec;
266 ucshort fec;
267 ucshort cvc;
268 ucshort cec;
269 ucshort ebc;
270
271 /* Status */
272 ucchar red_alarm;
273 ucchar blue_alarm;
274 ucchar loss_fa;
275 ucchar yellow_alarm;
276 ucchar loss_mfa;
277 ucchar prbs;
278} falc_t;
279
280typedef struct falc_status {
281 ucchar sync; /* If true FALC is synchronized */
282 ucchar red_alarm;
283 ucchar blue_alarm;
284 ucchar loss_fa;
285 ucchar yellow_alarm;
286 ucchar loss_mfa;
287 ucchar prbs;
288} falc_status_t;
289
290typedef struct rsv_x21_status {
291 ucchar dcd;
292 ucchar dsr;
293 ucchar cts;
294 ucchar rts;
295 ucchar dtr;
296} rsv_x21_status_t;
297
298typedef struct pc300stats {
299 int hw_type;
300 uclong line_on;
301 uclong line_off;
302 struct net_device_stats gen_stats;
303 falc_t te_stats;
304} pc300stats_t;
305
306typedef struct pc300status {
307 int hw_type;
308 rsv_x21_status_t gen_status;
309 falc_status_t te_status;
310} pc300status_t;
311
312typedef struct pc300loopback {
313 char loop_type;
314 char loop_on;
315} pc300loopback_t;
316
317typedef struct pc300patterntst {
318 char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */
319 ucshort num_errors;
320} pc300patterntst_t;
321
322typedef struct pc300dev {
323 void *if_ptr; /* General purpose pointer */
324 struct pc300ch *chan;
325 ucchar trace_on;
326 uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */
327 uclong line_off;
328#ifdef __KERNEL__
329 char name[16];
330 struct net_device *dev;
331
332 void *private;
333 struct sk_buff *tx_skb;
334 union { /* This union has all the protocol-specific structures */
335 struct ppp_device pppdev;
336 }ifu;
337#ifdef CONFIG_PC300_MLPPP
338 void *cpc_tty; /* information to PC300 TTY driver */
339#endif
340#endif /* __KERNEL__ */
341}pc300dev_t;
342
343typedef struct pc300hw {
344 int type; /* RSV, X21, etc. */
345 int bus; /* Bus (PCI, PMC, etc.) */
346 int nchan; /* number of channels */
347 int irq; /* interrupt request level */
348 uclong clock; /* Board clock */
349 ucchar cpld_id; /* CPLD ID (TE only) */
350 ucshort cpld_reg1; /* CPLD reg 1 (TE only) */
351 ucshort cpld_reg2; /* CPLD reg 2 (TE only) */
352 ucshort gpioc_reg; /* PLX GPIOC reg */
353 ucshort intctl_reg; /* PLX Int Ctrl/Status reg */
354 uclong iophys; /* PLX registers I/O base */
355 uclong iosize; /* PLX registers I/O size */
356 uclong plxphys; /* PLX registers MMIO base (physical) */
357 void __iomem * plxbase; /* PLX registers MMIO base (virtual) */
358 uclong plxsize; /* PLX registers MMIO size */
359 uclong scaphys; /* SCA registers MMIO base (physical) */
360 void __iomem * scabase; /* SCA registers MMIO base (virtual) */
361 uclong scasize; /* SCA registers MMIO size */
362 uclong ramphys; /* On-board RAM MMIO base (physical) */
363 void __iomem * rambase; /* On-board RAM MMIO base (virtual) */
364 uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */
365 uclong ramsize; /* On-board RAM MMIO size */
366 uclong falcphys; /* FALC registers MMIO base (physical) */
367 void __iomem * falcbase;/* FALC registers MMIO base (virtual) */
368 uclong falcsize; /* FALC registers MMIO size */
369} pc300hw_t;
370
371typedef struct pc300chconf {
372 sync_serial_settings phys_settings; /* Clock type/rate (in bps),
373 loopback mode */
374 raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */
375 uclong media; /* HW media (RS232, V.35, etc.) */
376 uclong proto; /* Protocol (PPP, X.25, etc.) */
377 ucchar monitor; /* Monitor mode (0 = off, !0 = on) */
378
379 /* TE-specific parameters */
380 ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */
381 ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */
382 ucchar lbo; /* Line Build Out */
383 ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */
384 uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */
385} pc300chconf_t;
386
387typedef struct pc300ch {
388 struct pc300 *card;
389 int channel;
390 pc300dev_t d;
391 pc300chconf_t conf;
392 ucchar tx_first_bd; /* First TX DMA block descr. w/ data */
393 ucchar tx_next_bd; /* Next free TX DMA block descriptor */
394 ucchar rx_first_bd; /* First free RX DMA block descriptor */
395 ucchar rx_last_bd; /* Last free RX DMA block descriptor */
396 ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */
397 falc_t falc; /* FALC structure (TE only) */
398} pc300ch_t;
399
400typedef struct pc300 {
401 pc300hw_t hw; /* hardware config. */
402 pc300ch_t chan[PC300_MAXCHAN];
403#ifdef __KERNEL__
404 spinlock_t card_lock;
405#endif /* __KERNEL__ */
406} pc300_t;
407
408typedef struct pc300conf {
409 pc300hw_t hw;
410 pc300chconf_t conf;
411} pc300conf_t;
412
413/* DEV ioctl() commands */
414#define N_SPPP_IOCTLS 2
415
416enum pc300_ioctl_cmds {
417 SIOCCPCRESERVED = (SIOCDEVPRIVATE + N_SPPP_IOCTLS),
418 SIOCGPC300CONF,
419 SIOCSPC300CONF,
420 SIOCGPC300STATUS,
421 SIOCGPC300FALCSTATUS,
422 SIOCGPC300UTILSTATS,
423 SIOCGPC300UTILSTATUS,
424 SIOCSPC300TRACE,
425 SIOCSPC300LOOPBACK,
426 SIOCSPC300PATTERNTEST,
427};
428
429/* Loopback types - PC300/TE boards */
430enum pc300_loopback_cmds {
431 PC300LOCLOOP = 1,
432 PC300REMLOOP,
433 PC300PAYLOADLOOP,
434 PC300GENLOOPUP,
435 PC300GENLOOPDOWN,
436};
437
438/* Control Constant Definitions */
439#define PC300_RSV 0x01
440#define PC300_X21 0x02
441#define PC300_TE 0x03
442
443#define PC300_PCI 0x00
444#define PC300_PMC 0x01
445
446#define PC300_LC_AMI 0x01
447#define PC300_LC_B8ZS 0x02
448#define PC300_LC_NRZ 0x03
449#define PC300_LC_HDB3 0x04
450
451/* Framing (T1) */
452#define PC300_FR_ESF 0x01
453#define PC300_FR_D4 0x02
454#define PC300_FR_ESF_JAPAN 0x03
455
456/* Framing (E1) */
457#define PC300_FR_MF_CRC4 0x04
458#define PC300_FR_MF_NON_CRC4 0x05
459#define PC300_FR_UNFRAMED 0x06
460
461#define PC300_LBO_0_DB 0x00
462#define PC300_LBO_7_5_DB 0x01
463#define PC300_LBO_15_DB 0x02
464#define PC300_LBO_22_5_DB 0x03
465
466#define PC300_RX_SENS_SH 0x01
467#define PC300_RX_SENS_LH 0x02
468
469#define PC300_TX_TIMEOUT (2*HZ)
470#define PC300_TX_QUEUE_LEN 100
471#define PC300_DEF_MTU 1600
472
473#ifdef __KERNEL__
474/* Function Prototypes */
475int dma_buf_write(pc300_t *, int, ucchar *, int);
476int dma_buf_read(pc300_t *, int, struct sk_buff *);
477void tx_dma_start(pc300_t *, int);
478void rx_dma_start(pc300_t *, int);
479void tx_dma_stop(pc300_t *, int);
480void rx_dma_stop(pc300_t *, int);
481int cpc_queue_xmit(struct sk_buff *, struct net_device *);
482void cpc_net_rx(struct net_device *);
483void cpc_sca_status(pc300_t *, int);
484int cpc_change_mtu(struct net_device *, int);
485int cpc_ioctl(struct net_device *, struct ifreq *, int);
486int ch_config(pc300dev_t *);
487int rx_config(pc300dev_t *);
488int tx_config(pc300dev_t *);
489void cpc_opench(pc300dev_t *);
490void cpc_closech(pc300dev_t *);
491int cpc_open(struct net_device *dev);
492int cpc_close(struct net_device *dev);
493int cpc_set_media(hdlc_device *, int);
494#endif /* __KERNEL__ */
495
496#endif /* _PC300_H */
497
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
new file mode 100644
index 000000000000..d67be2587d4d
--- /dev/null
+++ b/drivers/net/wan/pc300_drv.c
@@ -0,0 +1,3692 @@
1#define USE_PCI_CLOCK
2static char rcsid[] =
3"Revision: 3.4.5 Date: 2002/03/07 ";
4
5/*
6 * pc300.c Cyclades-PC300(tm) Driver.
7 *
8 * Author: Ivan Passos <ivan@cyclades.com>
9 * Maintainer: PC300 Maintainer <pc300@cyclades.com>
10 *
11 * Copyright: (c) 1999-2003 Cyclades Corp.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 * Using tabstop = 4.
19 *
20 * $Log: pc300_drv.c,v $
21 * Revision 3.23 2002/03/20 13:58:40 henrique
22 * Fixed ortographic mistakes
23 *
24 * Revision 3.22 2002/03/13 16:56:56 henrique
25 * Take out the debug messages
26 *
27 * Revision 3.21 2002/03/07 14:17:09 henrique
28 * License data fixed
29 *
30 * Revision 3.20 2002/01/17 17:58:52 ivan
31 * Support for PC300-TE/M (PMC).
32 *
33 * Revision 3.19 2002/01/03 17:08:47 daniela
34 * Enables DMA reception when the SCA-II disables it improperly.
35 *
36 * Revision 3.18 2001/12/03 18:47:50 daniela
37 * Esthetic changes.
38 *
39 * Revision 3.17 2001/10/19 16:50:13 henrique
40 * Patch to kernel 2.4.12 and new generic hdlc.
41 *
42 * Revision 3.16 2001/10/16 15:12:31 regina
43 * clear statistics
44 *
45 * Revision 3.11 to 3.15 2001/10/11 20:26:04 daniela
46 * More DMA fixes for noisy lines.
47 * Return the size of bad frames in dma_get_rx_frame_size, so that the Rx buffer
48 * descriptors can be cleaned by dma_buf_read (called in cpc_net_rx).
49 * Renamed dma_start routine to rx_dma_start. Improved Rx statistics.
50 * Fixed BOF interrupt treatment. Created dma_start routine.
51 * Changed min and max to cpc_min and cpc_max.
52 *
53 * Revision 3.10 2001/08/06 12:01:51 regina
54 * Fixed problem in DSR_DE bit.
55 *
56 * Revision 3.9 2001/07/18 19:27:26 daniela
57 * Added some history comments.
58 *
59 * Revision 3.8 2001/07/12 13:11:19 regina
60 * bug fix - DCD-OFF in pc300 tty driver
61 *
62 * Revision 3.3 to 3.7 2001/07/06 15:00:20 daniela
63 * Removing kernel 2.4.3 and previous support.
64 * DMA transmission bug fix.
65 * MTU check in cpc_net_rx fixed.
66 * Boot messages reviewed.
67 * New configuration parameters (line code, CRC calculation and clock).
68 *
69 * Revision 3.2 2001/06/22 13:13:02 regina
70 * MLPPP implementation. Changed the header of message trace to include
71 * the device name. New format : "hdlcX[R/T]: ".
72 * Default configuration changed.
73 *
74 * Revision 3.1 2001/06/15 regina
75 * in cpc_queue_xmit, netif_stop_queue is called if don't have free descriptor
76 * upping major version number
77 *
78 * Revision 1.1.1.1 2001/06/13 20:25:04 daniela
79 * PC300 initial CVS version (3.4.0-pre1)
80 *
81 * Revision 3.0.1.2 2001/06/08 daniela
82 * Did some changes in the DMA programming implementation to avoid the
83 * occurrence of a SCA-II bug when CDA is accessed during a DMA transfer.
84 *
85 * Revision 3.0.1.1 2001/05/02 daniela
86 * Added kernel 2.4.3 support.
87 *
88 * Revision 3.0.1.0 2001/03/13 daniela, henrique
89 * Added Frame Relay Support.
90 * Driver now uses HDLC generic driver to provide protocol support.
91 *
92 * Revision 3.0.0.8 2001/03/02 daniela
93 * Fixed ram size detection.
94 * Changed SIOCGPC300CONF ioctl, to give hw information to pc300util.
95 *
96 * Revision 3.0.0.7 2001/02/23 daniela
97 * netif_stop_queue called before the SCA-II transmition commands in
98 * cpc_queue_xmit, and with interrupts disabled to avoid race conditions with
99 * transmition interrupts.
100 * Fixed falc_check_status for Unframed E1.
101 *
102 * Revision 3.0.0.6 2000/12/13 daniela
103 * Implemented pc300util support: trace, statistics, status and loopback
104 * tests for the PC300 TE boards.
105 *
106 * Revision 3.0.0.5 2000/12/12 ivan
107 * Added support for Unframed E1.
108 * Implemented monitor mode.
109 * Fixed DCD sensitivity on the second channel.
110 * Driver now complies with new PCI kernel architecture.
111 *
112 * Revision 3.0.0.4 2000/09/28 ivan
113 * Implemented DCD sensitivity.
114 * Moved hardware-specific open to the end of cpc_open, to avoid race
115 * conditions with early reception interrupts.
116 * Included code for [request|release]_mem_region().
117 * Changed location of pc300.h .
118 * Minor code revision (contrib. of Jeff Garzik).
119 *
120 * Revision 3.0.0.3 2000/07/03 ivan
121 * Previous bugfix for the framing errors with external clock made X21
122 * boards stop working. This version fixes it.
123 *
124 * Revision 3.0.0.2 2000/06/23 ivan
125 * Revisited cpc_queue_xmit to prevent race conditions on Tx DMA buffer
126 * handling when Tx timeouts occur.
127 * Revisited Rx statistics.
128 * Fixed a bug in the SCA-II programming that would cause framing errors
129 * when external clock was configured.
130 *
131 * Revision 3.0.0.1 2000/05/26 ivan
132 * Added logic in the SCA interrupt handler so that no board can monopolize
133 * the driver.
134 * Request PLX I/O region, although driver doesn't use it, to avoid
135 * problems with other drivers accessing it.
136 *
137 * Revision 3.0.0.0 2000/05/15 ivan
138 * Did some changes in the DMA programming implementation to avoid the
139 * occurrence of a SCA-II bug in the second channel.
140 * Implemented workaround for PLX9050 bug that would cause a system lockup
141 * in certain systems, depending on the MMIO addresses allocated to the
142 * board.
143 * Fixed the FALC chip programming to avoid synchronization problems in the
144 * second channel (TE only).
145 * Implemented a cleaner and faster Tx DMA descriptor cleanup procedure in
146 * cpc_queue_xmit().
147 * Changed the built-in driver implementation so that the driver can use the
148 * general 'hdlcN' naming convention instead of proprietary device names.
149 * Driver load messages are now device-centric, instead of board-centric.
150 * Dynamic allocation of net_device structures.
151 * Code is now compliant with the new module interface (module_[init|exit]).
152 * Make use of the PCI helper functions to access PCI resources.
153 *
154 * Revision 2.0.0.0 2000/04/15 ivan
155 * Added support for the PC300/TE boards (T1/FT1/E1/FE1).
156 *
157 * Revision 1.1.0.0 2000/02/28 ivan
158 * Major changes in the driver architecture.
159 * Softnet compliancy implemented.
160 * Driver now reports physical instead of virtual memory addresses.
161 * Added cpc_change_mtu function.
162 *
163 * Revision 1.0.0.0 1999/12/16 ivan
164 * First official release.
165 * Support for 1- and 2-channel boards (which use distinct PCI Device ID's).
166 * Support for monolythic installation (i.e., drv built into the kernel).
167 * X.25 additional checking when lapb_[dis]connect_request returns an error.
168 * SCA programming now covers X.21 as well.
169 *
170 * Revision 0.3.1.0 1999/11/18 ivan
171 * Made X.25 support configuration-dependent (as it depends on external
172 * modules to work).
173 * Changed X.25-specific function names to comply with adopted convention.
174 * Fixed typos in X.25 functions that would cause compile errors (Daniela).
175 * Fixed bug in ch_config that would disable interrupts on a previously
176 * enabled channel if the other channel on the same board was enabled later.
177 *
178 * Revision 0.3.0.0 1999/11/16 daniela
179 * X.25 support.
180 *
181 * Revision 0.2.3.0 1999/11/15 ivan
182 * Function cpc_ch_status now provides more detailed information.
183 * Added support for X.21 clock configuration.
184 * Changed TNR1 setting in order to prevent Tx FIFO overaccesses by the SCA.
185 * Now using PCI clock instead of internal oscillator clock for the SCA.
186 *
187 * Revision 0.2.2.0 1999/11/10 ivan
188 * Changed the *_dma_buf_check functions so that they would print only
189 * the useful info instead of the whole buffer descriptor bank.
190 * Fixed bug in cpc_queue_xmit that would eventually crash the system
191 * in case of a packet drop.
192 * Implemented TX underrun handling.
193 * Improved SCA fine tuning to boost up its performance.
194 *
195 * Revision 0.2.1.0 1999/11/03 ivan
196 * Added functions *dma_buf_pt_init to allow independent initialization
197 * of the next-descr. and DMA buffer pointers on the DMA descriptors.
198 * Kernel buffer release and tbusy clearing is now done in the interrupt
199 * handler.
200 * Fixed bug in cpc_open that would cause an interface reopen to fail.
201 * Added a protocol-specific code section in cpc_net_rx.
202 * Removed printk level defs (they might be added back after the beta phase).
203 *
204 * Revision 0.2.0.0 1999/10/28 ivan
205 * Revisited the code so that new protocols can be easily added / supported.
206 *
207 * Revision 0.1.0.1 1999/10/20 ivan
208 * Mostly "esthetic" changes.
209 *
210 * Revision 0.1.0.0 1999/10/11 ivan
211 * Initial version.
212 *
213 */
214
215#include <linux/module.h>
216#include <linux/kernel.h>
217#include <linux/mm.h>
218#include <linux/ioport.h>
219#include <linux/pci.h>
220#include <linux/errno.h>
221#include <linux/string.h>
222#include <linux/init.h>
223#include <linux/delay.h>
224#include <linux/net.h>
225#include <linux/skbuff.h>
226#include <linux/if_arp.h>
227#include <linux/netdevice.h>
228#include <linux/spinlock.h>
229#include <linux/if.h>
230
231#include <net/syncppp.h>
232#include <net/arp.h>
233
234#include <asm/io.h>
235#include <asm/uaccess.h>
236
237#include "pc300.h"
238
239#define CPC_LOCK(card,flags) \
240 do { \
241 spin_lock_irqsave(&card->card_lock, flags); \
242 } while (0)
243
244#define CPC_UNLOCK(card,flags) \
245 do { \
246 spin_unlock_irqrestore(&card->card_lock, flags); \
247 } while (0)
248
249#undef PC300_DEBUG_PCI
250#undef PC300_DEBUG_INTR
251#undef PC300_DEBUG_TX
252#undef PC300_DEBUG_RX
253#undef PC300_DEBUG_OTHER
254
255static struct pci_device_id cpc_pci_dev_id[] __devinitdata = {
256 /* PC300/RSV or PC300/X21, 2 chan */
257 {0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
258 /* PC300/RSV or PC300/X21, 1 chan */
259 {0x120e, 0x301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x301},
260 /* PC300/TE, 2 chan */
261 {0x120e, 0x310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x310},
262 /* PC300/TE, 1 chan */
263 {0x120e, 0x311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x311},
264 /* PC300/TE-M, 2 chan */
265 {0x120e, 0x320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x320},
266 /* PC300/TE-M, 1 chan */
267 {0x120e, 0x321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x321},
268 /* End of table */
269 {0,},
270};
271MODULE_DEVICE_TABLE(pci, cpc_pci_dev_id);
272
273#ifndef cpc_min
274#define cpc_min(a,b) (((a)<(b))?(a):(b))
275#endif
276#ifndef cpc_max
277#define cpc_max(a,b) (((a)>(b))?(a):(b))
278#endif
279
280/* prototypes */
281static void tx_dma_buf_pt_init(pc300_t *, int);
282static void tx_dma_buf_init(pc300_t *, int);
283static void rx_dma_buf_pt_init(pc300_t *, int);
284static void rx_dma_buf_init(pc300_t *, int);
285static void tx_dma_buf_check(pc300_t *, int);
286static void rx_dma_buf_check(pc300_t *, int);
287static irqreturn_t cpc_intr(int, void *, struct pt_regs *);
288static struct net_device_stats *cpc_get_stats(struct net_device *);
289static int clock_rate_calc(uclong, uclong, int *);
290static uclong detect_ram(pc300_t *);
291static void plx_init(pc300_t *);
292static void cpc_trace(struct net_device *, struct sk_buff *, char);
293static int cpc_attach(struct net_device *, unsigned short, unsigned short);
294
295#ifdef CONFIG_PC300_MLPPP
296void cpc_tty_init(pc300dev_t * dev);
297void cpc_tty_unregister_service(pc300dev_t * pc300dev);
298void cpc_tty_receive(pc300dev_t * pc300dev);
299void cpc_tty_trigger_poll(pc300dev_t * pc300dev);
300void cpc_tty_reset_var(void);
301#endif
302
303/************************/
304/*** DMA Routines ***/
305/************************/
306static void tx_dma_buf_pt_init(pc300_t * card, int ch)
307{
308 int i;
309 int ch_factor = ch * N_DMA_TX_BUF;
310 volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
311 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
312
313 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
314 cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE +
315 (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t)));
316 cpc_writel(&ptdescr->ptbuf,
317 (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN));
318 }
319}
320
321static void tx_dma_buf_init(pc300_t * card, int ch)
322{
323 int i;
324 int ch_factor = ch * N_DMA_TX_BUF;
325 volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
326 + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
327
328 for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) {
329 memset_io(ptdescr, 0, sizeof(pcsca_bd_t));
330 cpc_writew(&ptdescr->len, 0);
331 cpc_writeb(&ptdescr->status, DST_OSB);
332 }
333 tx_dma_buf_pt_init(card, ch);
334}
335
336static void rx_dma_buf_pt_init(pc300_t * card, int ch)
337{
338 int i;
339 int ch_factor = ch * N_DMA_RX_BUF;
340 volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
341 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
342
343 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
344 cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE +
345 (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t)));
346 cpc_writel(&ptdescr->ptbuf,
347 (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN));
348 }
349}
350
351static void rx_dma_buf_init(pc300_t * card, int ch)
352{
353 int i;
354 int ch_factor = ch * N_DMA_RX_BUF;
355 volatile pcsca_bd_t __iomem *ptdescr = (card->hw.rambase
356 + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
357
358 for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) {
359 memset_io(ptdescr, 0, sizeof(pcsca_bd_t));
360 cpc_writew(&ptdescr->len, 0);
361 cpc_writeb(&ptdescr->status, 0);
362 }
363 rx_dma_buf_pt_init(card, ch);
364}
365
366static void tx_dma_buf_check(pc300_t * card, int ch)
367{
368 volatile pcsca_bd_t __iomem *ptdescr;
369 int i;
370 ucshort first_bd = card->chan[ch].tx_first_bd;
371 ucshort next_bd = card->chan[ch].tx_next_bd;
372
373 printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch,
374 first_bd, TX_BD_ADDR(ch, first_bd),
375 next_bd, TX_BD_ADDR(ch, next_bd));
376 for (i = first_bd,
377 ptdescr = (card->hw.rambase + TX_BD_ADDR(ch, first_bd));
378 i != ((next_bd + 1) & (N_DMA_TX_BUF - 1));
379 i = (i + 1) & (N_DMA_TX_BUF - 1),
380 ptdescr = (card->hw.rambase + TX_BD_ADDR(ch, i))) {
381 printk("\n CH%d TX%d: next=0x%x, ptbuf=0x%x, ST=0x%x, len=%d",
382 ch, i, cpc_readl(&ptdescr->next),
383 cpc_readl(&ptdescr->ptbuf),
384 cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len));
385 }
386 printk("\n");
387}
388
389#ifdef PC300_DEBUG_OTHER
390/* Show all TX buffer descriptors */
391static void tx1_dma_buf_check(pc300_t * card, int ch)
392{
393 volatile pcsca_bd_t __iomem *ptdescr;
394 int i;
395 ucshort first_bd = card->chan[ch].tx_first_bd;
396 ucshort next_bd = card->chan[ch].tx_next_bd;
397 uclong scabase = card->hw.scabase;
398
399 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd);
400 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
401 first_bd, TX_BD_ADDR(ch, first_bd),
402 next_bd, TX_BD_ADDR(ch, next_bd));
403 printk("TX_CDA=0x%08x, TX_EDA=0x%08x\n",
404 cpc_readl(scabase + DTX_REG(CDAL, ch)),
405 cpc_readl(scabase + DTX_REG(EDAL, ch)));
406 for (i = 0; i < N_DMA_TX_BUF; i++) {
407 ptdescr = (card->hw.rambase + TX_BD_ADDR(ch, i));
408 printk("\n CH%d TX%d: next=0x%x, ptbuf=0x%x, ST=0x%x, len=%d",
409 ch, i, cpc_readl(&ptdescr->next),
410 cpc_readl(&ptdescr->ptbuf),
411 cpc_readb(&ptdescr->status), cpc_readw(&ptdescr->len));
412 }
413 printk("\n");
414}
415#endif
416
417static void rx_dma_buf_check(pc300_t * card, int ch)
418{
419 volatile pcsca_bd_t __iomem *ptdescr;
420 int i;
421 ucshort first_bd = card->chan[ch].rx_first_bd;
422 ucshort last_bd = card->chan[ch].rx_last_bd;
423 int ch_factor;
424
425 ch_factor = ch * N_DMA_RX_BUF;
426 printk("#CH%d: f_bd = %d, l_bd = %d\n", ch, first_bd, last_bd);
427 for (i = 0, ptdescr = (card->hw.rambase +
428 DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t));
429 i < N_DMA_RX_BUF; i++, ptdescr++) {
430 if (cpc_readb(&ptdescr->status) & DST_OSB)
431 printk ("\n CH%d RX%d: next=0x%x, ptbuf=0x%x, ST=0x%x, len=%d",
432 ch, i, cpc_readl(&ptdescr->next),
433 cpc_readl(&ptdescr->ptbuf),
434 cpc_readb(&ptdescr->status),
435 cpc_readw(&ptdescr->len));
436 }
437 printk("\n");
438}
439
440int dma_get_rx_frame_size(pc300_t * card, int ch)
441{
442 volatile pcsca_bd_t __iomem *ptdescr;
443 ucshort first_bd = card->chan[ch].rx_first_bd;
444 int rcvd = 0;
445 volatile ucchar status;
446
447 ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd));
448 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
449 rcvd += cpc_readw(&ptdescr->len);
450 first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1);
451 if ((status & DST_EOM) || (first_bd == card->chan[ch].rx_last_bd)) {
452 /* Return the size of a good frame or incomplete bad frame
453 * (dma_buf_read will clean the buffer descriptors in this case). */
454 return (rcvd);
455 }
456 ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next));
457 }
458 return (-1);
459}
460
461/*
462 * dma_buf_write: writes a frame to the Tx DMA buffers
463 * NOTE: this function writes one frame at a time.
464 */
465int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
466{
467 int i, nchar;
468 volatile pcsca_bd_t __iomem *ptdescr;
469 int tosend = len;
470 ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1;
471
472 if (nbuf >= card->chan[ch].nfree_tx_bd) {
473 return -ENOMEM;
474 }
475
476 for (i = 0; i < nbuf; i++) {
477 ptdescr = (card->hw.rambase +
478 TX_BD_ADDR(ch, card->chan[ch].tx_next_bd));
479 nchar = cpc_min(BD_DEF_LEN, tosend);
480 if (cpc_readb(&ptdescr->status) & DST_OSB) {
481 memcpy_toio((card->hw.rambase + cpc_readl(&ptdescr->ptbuf)),
482 &ptdata[len - tosend], nchar);
483 cpc_writew(&ptdescr->len, nchar);
484 card->chan[ch].nfree_tx_bd--;
485 if ((i + 1) == nbuf) {
486 /* This must be the last BD to be used */
487 cpc_writeb(&ptdescr->status, DST_EOM);
488 } else {
489 cpc_writeb(&ptdescr->status, 0);
490 }
491 } else {
492 return -ENOMEM;
493 }
494 tosend -= nchar;
495 card->chan[ch].tx_next_bd =
496 (card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1);
497 }
498 /* If it gets to here, it means we have sent the whole frame */
499 return 0;
500}
501
502/*
503 * dma_buf_read: reads a frame from the Rx DMA buffers
504 * NOTE: this function reads one frame at a time.
505 */
506int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
507{
508 int nchar;
509 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
510 volatile pcsca_bd_t __iomem *ptdescr;
511 int rcvd = 0;
512 volatile ucchar status;
513
514 ptdescr = (card->hw.rambase +
515 RX_BD_ADDR(ch, chan->rx_first_bd));
516 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
517 nchar = cpc_readw(&ptdescr->len);
518 if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT))
519 || (nchar > BD_DEF_LEN)) {
520
521 if (nchar > BD_DEF_LEN)
522 status |= DST_RBIT;
523 rcvd = -status;
524 /* Discard remaining descriptors used by the bad frame */
525 while (chan->rx_first_bd != chan->rx_last_bd) {
526 cpc_writeb(&ptdescr->status, 0);
527 chan->rx_first_bd = (chan->rx_first_bd+1) & (N_DMA_RX_BUF-1);
528 if (status & DST_EOM)
529 break;
530 ptdescr = (card->hw.rambase +
531 cpc_readl(&ptdescr->next));
532 status = cpc_readb(&ptdescr->status);
533 }
534 break;
535 }
536 if (nchar != 0) {
537 if (skb) {
538 memcpy_fromio(skb_put(skb, nchar),
539 (card->hw.rambase+cpc_readl(&ptdescr->ptbuf)),nchar);
540 }
541 rcvd += nchar;
542 }
543 cpc_writeb(&ptdescr->status, 0);
544 cpc_writeb(&ptdescr->len, 0);
545 chan->rx_first_bd = (chan->rx_first_bd + 1) & (N_DMA_RX_BUF - 1);
546
547 if (status & DST_EOM)
548 break;
549
550 ptdescr = (card->hw.rambase + cpc_readl(&ptdescr->next));
551 }
552
553 if (rcvd != 0) {
554 /* Update pointer */
555 chan->rx_last_bd = (chan->rx_first_bd - 1) & (N_DMA_RX_BUF - 1);
556 /* Update EDA */
557 cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
558 RX_BD_ADDR(ch, chan->rx_last_bd));
559 }
560 return (rcvd);
561}
562
563void tx_dma_stop(pc300_t * card, int ch)
564{
565 void __iomem *scabase = card->hw.scabase;
566 ucchar drr_ena_bit = 1 << (5 + 2 * ch);
567 ucchar drr_rst_bit = 1 << (1 + 2 * ch);
568
569 /* Disable DMA */
570 cpc_writeb(scabase + DRR, drr_ena_bit);
571 cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
572}
573
574void rx_dma_stop(pc300_t * card, int ch)
575{
576 void __iomem *scabase = card->hw.scabase;
577 ucchar drr_ena_bit = 1 << (4 + 2 * ch);
578 ucchar drr_rst_bit = 1 << (2 * ch);
579
580 /* Disable DMA */
581 cpc_writeb(scabase + DRR, drr_ena_bit);
582 cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
583}
584
585void rx_dma_start(pc300_t * card, int ch)
586{
587 void __iomem *scabase = card->hw.scabase;
588 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
589
590 /* Start DMA */
591 cpc_writel(scabase + DRX_REG(CDAL, ch),
592 RX_BD_ADDR(ch, chan->rx_first_bd));
593 if (cpc_readl(scabase + DRX_REG(CDAL,ch)) !=
594 RX_BD_ADDR(ch, chan->rx_first_bd)) {
595 cpc_writel(scabase + DRX_REG(CDAL, ch),
596 RX_BD_ADDR(ch, chan->rx_first_bd));
597 }
598 cpc_writel(scabase + DRX_REG(EDAL, ch),
599 RX_BD_ADDR(ch, chan->rx_last_bd));
600 cpc_writew(scabase + DRX_REG(BFLL, ch), BD_DEF_LEN);
601 cpc_writeb(scabase + DSR_RX(ch), DSR_DE);
602 if (!(cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
603 cpc_writeb(scabase + DSR_RX(ch), DSR_DE);
604 }
605}
606
607/*************************/
608/*** FALC Routines ***/
609/*************************/
610void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
611{
612 void __iomem *falcbase = card->hw.falcbase;
613 unsigned long i = 0;
614
615 while (cpc_readb(falcbase + F_REG(SIS, ch)) & SIS_CEC) {
616 if (i++ >= PC300_FALC_MAXLOOP) {
617 printk("%s: FALC command locked(cmd=0x%x).\n",
618 card->chan[ch].d.name, cmd);
619 break;
620 }
621 }
622 cpc_writeb(falcbase + F_REG(CMDR, ch), cmd);
623}
624
625void falc_intr_enable(pc300_t * card, int ch)
626{
627 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
628 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
629 falc_t *pfalc = (falc_t *) & chan->falc;
630 void __iomem *falcbase = card->hw.falcbase;
631
632 /* Interrupt pins are open-drain */
633 cpc_writeb(falcbase + F_REG(IPC, ch),
634 cpc_readb(falcbase + F_REG(IPC, ch)) & ~IPC_IC0);
635 /* Conters updated each second */
636 cpc_writeb(falcbase + F_REG(FMR1, ch),
637 cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_ECM);
638 /* Enable SEC and ES interrupts */
639 cpc_writeb(falcbase + F_REG(IMR3, ch),
640 cpc_readb(falcbase + F_REG(IMR3, ch)) & ~(IMR3_SEC | IMR3_ES));
641 if (conf->fr_mode == PC300_FR_UNFRAMED) {
642 cpc_writeb(falcbase + F_REG(IMR4, ch),
643 cpc_readb(falcbase + F_REG(IMR4, ch)) & ~(IMR4_LOS));
644 } else {
645 cpc_writeb(falcbase + F_REG(IMR4, ch),
646 cpc_readb(falcbase + F_REG(IMR4, ch)) &
647 ~(IMR4_LFA | IMR4_AIS | IMR4_LOS | IMR4_SLIP));
648 }
649 if (conf->media == IF_IFACE_T1) {
650 cpc_writeb(falcbase + F_REG(IMR3, ch),
651 cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC);
652 } else {
653 cpc_writeb(falcbase + F_REG(IPC, ch),
654 cpc_readb(falcbase + F_REG(IPC, ch)) | IPC_SCI);
655 if (conf->fr_mode == PC300_FR_UNFRAMED) {
656 cpc_writeb(falcbase + F_REG(IMR2, ch),
657 cpc_readb(falcbase + F_REG(IMR2, ch)) & ~(IMR2_LOS));
658 } else {
659 cpc_writeb(falcbase + F_REG(IMR2, ch),
660 cpc_readb(falcbase + F_REG(IMR2, ch)) &
661 ~(IMR2_FAR | IMR2_LFA | IMR2_AIS | IMR2_LOS));
662 if (pfalc->multiframe_mode) {
663 cpc_writeb(falcbase + F_REG(IMR2, ch),
664 cpc_readb(falcbase + F_REG(IMR2, ch)) &
665 ~(IMR2_T400MS | IMR2_MFAR));
666 } else {
667 cpc_writeb(falcbase + F_REG(IMR2, ch),
668 cpc_readb(falcbase + F_REG(IMR2, ch)) |
669 IMR2_T400MS | IMR2_MFAR);
670 }
671 }
672 }
673}
674
675void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
676{
677 void __iomem *falcbase = card->hw.falcbase;
678 ucchar tshf = card->chan[ch].falc.offset;
679
680 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
681 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) &
682 ~(0x80 >> ((timeslot - tshf) & 0x07)));
683 cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch),
684 cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) |
685 (0x80 >> (timeslot & 0x07)));
686 cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch),
687 cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) |
688 (0x80 >> (timeslot & 0x07)));
689}
690
691void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
692{
693 void __iomem *falcbase = card->hw.falcbase;
694 ucchar tshf = card->chan[ch].falc.offset;
695
696 cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch),
697 cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) |
698 (0x80 >> ((timeslot - tshf) & 0x07)));
699 cpc_writeb(falcbase + F_REG((TTR1 + timeslot / 8), ch),
700 cpc_readb(falcbase + F_REG((TTR1 + timeslot / 8), ch)) &
701 ~(0x80 >> (timeslot & 0x07)));
702 cpc_writeb(falcbase + F_REG((RTR1 + timeslot / 8), ch),
703 cpc_readb(falcbase + F_REG((RTR1 + timeslot / 8), ch)) &
704 ~(0x80 >> (timeslot & 0x07)));
705}
706
707void falc_close_all_timeslots(pc300_t * card, int ch)
708{
709 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
710 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
711 void __iomem *falcbase = card->hw.falcbase;
712
713 cpc_writeb(falcbase + F_REG(ICB1, ch), 0xff);
714 cpc_writeb(falcbase + F_REG(TTR1, ch), 0);
715 cpc_writeb(falcbase + F_REG(RTR1, ch), 0);
716 cpc_writeb(falcbase + F_REG(ICB2, ch), 0xff);
717 cpc_writeb(falcbase + F_REG(TTR2, ch), 0);
718 cpc_writeb(falcbase + F_REG(RTR2, ch), 0);
719 cpc_writeb(falcbase + F_REG(ICB3, ch), 0xff);
720 cpc_writeb(falcbase + F_REG(TTR3, ch), 0);
721 cpc_writeb(falcbase + F_REG(RTR3, ch), 0);
722 if (conf->media == IF_IFACE_E1) {
723 cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff);
724 cpc_writeb(falcbase + F_REG(TTR4, ch), 0);
725 cpc_writeb(falcbase + F_REG(RTR4, ch), 0);
726 }
727}
728
729void falc_open_all_timeslots(pc300_t * card, int ch)
730{
731 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
732 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
733 void __iomem *falcbase = card->hw.falcbase;
734
735 cpc_writeb(falcbase + F_REG(ICB1, ch), 0);
736 if (conf->fr_mode == PC300_FR_UNFRAMED) {
737 cpc_writeb(falcbase + F_REG(TTR1, ch), 0xff);
738 cpc_writeb(falcbase + F_REG(RTR1, ch), 0xff);
739 } else {
740 /* Timeslot 0 is never enabled */
741 cpc_writeb(falcbase + F_REG(TTR1, ch), 0x7f);
742 cpc_writeb(falcbase + F_REG(RTR1, ch), 0x7f);
743 }
744 cpc_writeb(falcbase + F_REG(ICB2, ch), 0);
745 cpc_writeb(falcbase + F_REG(TTR2, ch), 0xff);
746 cpc_writeb(falcbase + F_REG(RTR2, ch), 0xff);
747 cpc_writeb(falcbase + F_REG(ICB3, ch), 0);
748 cpc_writeb(falcbase + F_REG(TTR3, ch), 0xff);
749 cpc_writeb(falcbase + F_REG(RTR3, ch), 0xff);
750 if (conf->media == IF_IFACE_E1) {
751 cpc_writeb(falcbase + F_REG(ICB4, ch), 0);
752 cpc_writeb(falcbase + F_REG(TTR4, ch), 0xff);
753 cpc_writeb(falcbase + F_REG(RTR4, ch), 0xff);
754 } else {
755 cpc_writeb(falcbase + F_REG(ICB4, ch), 0xff);
756 cpc_writeb(falcbase + F_REG(TTR4, ch), 0x80);
757 cpc_writeb(falcbase + F_REG(RTR4, ch), 0x80);
758 }
759}
760
761void falc_init_timeslot(pc300_t * card, int ch)
762{
763 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
764 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
765 falc_t *pfalc = (falc_t *) & chan->falc;
766 int tslot;
767
768 for (tslot = 0; tslot < pfalc->num_channels; tslot++) {
769 if (conf->tslot_bitmap & (1 << tslot)) {
770 // Channel enabled
771 falc_open_timeslot(card, ch, tslot + 1);
772 } else {
773 // Channel disabled
774 falc_close_timeslot(card, ch, tslot + 1);
775 }
776 }
777}
778
779void falc_enable_comm(pc300_t * card, int ch)
780{
781 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
782 falc_t *pfalc = (falc_t *) & chan->falc;
783
784 if (pfalc->full_bandwidth) {
785 falc_open_all_timeslots(card, ch);
786 } else {
787 falc_init_timeslot(card, ch);
788 }
789 // CTS/DCD ON
790 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
791 cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) &
792 ~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
793}
794
795void falc_disable_comm(pc300_t * card, int ch)
796{
797 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
798 falc_t *pfalc = (falc_t *) & chan->falc;
799
800 if (pfalc->loop_active != 2) {
801 falc_close_all_timeslots(card, ch);
802 }
803 // CTS/DCD OFF
804 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
805 cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
806 ((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
807}
808
809void falc_init_t1(pc300_t * card, int ch)
810{
811 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
812 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
813 falc_t *pfalc = (falc_t *) & chan->falc;
814 void __iomem *falcbase = card->hw.falcbase;
815 ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
816
817 /* Switch to T1 mode (PCM 24) */
818 cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD);
819
820 /* Wait 20 us for setup */
821 udelay(20);
822
823 /* Transmit Buffer Size (1 frame) */
824 cpc_writeb(falcbase + F_REG(SIC1, ch), SIC1_XBS0);
825
826 /* Clock mode */
827 if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */
828 cpc_writeb(falcbase + F_REG(LIM0, ch),
829 cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS);
830 } else { /* Slave mode */
831 cpc_writeb(falcbase + F_REG(LIM0, ch),
832 cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS);
833 cpc_writeb(falcbase + F_REG(LOOP, ch),
834 cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_RTM);
835 }
836
837 cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI);
838 cpc_writeb(falcbase + F_REG(FMR0, ch),
839 cpc_readb(falcbase + F_REG(FMR0, ch)) &
840 ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1));
841
842 switch (conf->lcode) {
843 case PC300_LC_AMI:
844 cpc_writeb(falcbase + F_REG(FMR0, ch),
845 cpc_readb(falcbase + F_REG(FMR0, ch)) |
846 FMR0_XC1 | FMR0_RC1);
847 /* Clear Channel register to ON for all channels */
848 cpc_writeb(falcbase + F_REG(CCB1, ch), 0xff);
849 cpc_writeb(falcbase + F_REG(CCB2, ch), 0xff);
850 cpc_writeb(falcbase + F_REG(CCB3, ch), 0xff);
851 break;
852
853 case PC300_LC_B8ZS:
854 cpc_writeb(falcbase + F_REG(FMR0, ch),
855 cpc_readb(falcbase + F_REG(FMR0, ch)) |
856 FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1);
857 break;
858
859 case PC300_LC_NRZ:
860 cpc_writeb(falcbase + F_REG(FMR0, ch),
861 cpc_readb(falcbase + F_REG(FMR0, ch)) | 0x00);
862 break;
863 }
864
865 cpc_writeb(falcbase + F_REG(LIM0, ch),
866 cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_ELOS);
867 cpc_writeb(falcbase + F_REG(LIM0, ch),
868 cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0));
869 /* Set interface mode to 2 MBPS */
870 cpc_writeb(falcbase + F_REG(FMR1, ch),
871 cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD);
872
873 switch (conf->fr_mode) {
874 case PC300_FR_ESF:
875 pfalc->multiframe_mode = 0;
876 cpc_writeb(falcbase + F_REG(FMR4, ch),
877 cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_FM1);
878 cpc_writeb(falcbase + F_REG(FMR1, ch),
879 cpc_readb(falcbase + F_REG(FMR1, ch)) |
880 FMR1_CRC | FMR1_EDL);
881 cpc_writeb(falcbase + F_REG(XDL1, ch), 0);
882 cpc_writeb(falcbase + F_REG(XDL2, ch), 0);
883 cpc_writeb(falcbase + F_REG(XDL3, ch), 0);
884 cpc_writeb(falcbase + F_REG(FMR0, ch),
885 cpc_readb(falcbase + F_REG(FMR0, ch)) & ~FMR0_SRAF);
886 cpc_writeb(falcbase + F_REG(FMR2, ch),
887 cpc_readb(falcbase + F_REG(FMR2,ch)) | FMR2_MCSP | FMR2_SSP);
888 break;
889
890 case PC300_FR_D4:
891 pfalc->multiframe_mode = 1;
892 cpc_writeb(falcbase + F_REG(FMR4, ch),
893 cpc_readb(falcbase + F_REG(FMR4, ch)) &
894 ~(FMR4_FM1 | FMR4_FM0));
895 cpc_writeb(falcbase + F_REG(FMR0, ch),
896 cpc_readb(falcbase + F_REG(FMR0, ch)) | FMR0_SRAF);
897 cpc_writeb(falcbase + F_REG(FMR2, ch),
898 cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_SSP);
899 break;
900 }
901
902 /* Enable Automatic Resynchronization */
903 cpc_writeb(falcbase + F_REG(FMR4, ch),
904 cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_AUTO);
905
906 /* Transmit Automatic Remote Alarm */
907 cpc_writeb(falcbase + F_REG(FMR2, ch),
908 cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA);
909
910 /* Channel translation mode 1 : one to one */
911 cpc_writeb(falcbase + F_REG(FMR1, ch),
912 cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_CTM);
913
914 /* No signaling */
915 cpc_writeb(falcbase + F_REG(FMR1, ch),
916 cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_SIGM);
917 cpc_writeb(falcbase + F_REG(FMR5, ch),
918 cpc_readb(falcbase + F_REG(FMR5, ch)) &
919 ~(FMR5_EIBR | FMR5_SRS));
920 cpc_writeb(falcbase + F_REG(CCR1, ch), 0);
921
922 cpc_writeb(falcbase + F_REG(LIM1, ch),
923 cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1);
924
925 switch (conf->lbo) {
926 /* Provides proper Line Build Out */
927 case PC300_LBO_0_DB:
928 cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja));
929 cpc_writeb(falcbase + F_REG(XPM0, ch), 0x5a);
930 cpc_writeb(falcbase + F_REG(XPM1, ch), 0x8f);
931 cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
932 break;
933 case PC300_LBO_7_5_DB:
934 cpc_writeb(falcbase + F_REG(LIM2, ch), (0x40 | LIM2_LOS1 | dja));
935 cpc_writeb(falcbase + F_REG(XPM0, ch), 0x11);
936 cpc_writeb(falcbase + F_REG(XPM1, ch), 0x02);
937 cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
938 break;
939 case PC300_LBO_15_DB:
940 cpc_writeb(falcbase + F_REG(LIM2, ch), (0x80 | LIM2_LOS1 | dja));
941 cpc_writeb(falcbase + F_REG(XPM0, ch), 0x8e);
942 cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01);
943 cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
944 break;
945 case PC300_LBO_22_5_DB:
946 cpc_writeb(falcbase + F_REG(LIM2, ch), (0xc0 | LIM2_LOS1 | dja));
947 cpc_writeb(falcbase + F_REG(XPM0, ch), 0x09);
948 cpc_writeb(falcbase + F_REG(XPM1, ch), 0x01);
949 cpc_writeb(falcbase + F_REG(XPM2, ch), 0x20);
950 break;
951 }
952
953 /* Transmit Clock-Slot Offset */
954 cpc_writeb(falcbase + F_REG(XC0, ch),
955 cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01);
956 /* Transmit Time-slot Offset */
957 cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e);
958 /* Receive Clock-Slot offset */
959 cpc_writeb(falcbase + F_REG(RC0, ch), 0x05);
960 /* Receive Time-slot offset */
961 cpc_writeb(falcbase + F_REG(RC1, ch), 0x00);
962
963 /* LOS Detection after 176 consecutive 0s */
964 cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a);
965 /* LOS Recovery after 22 ones in the time window of PCD */
966 cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15);
967
968 cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f);
969
970 if (conf->fr_mode == PC300_FR_ESF_JAPAN) {
971 cpc_writeb(falcbase + F_REG(RC1, ch),
972 cpc_readb(falcbase + F_REG(RC1, ch)) | 0x80);
973 }
974
975 falc_close_all_timeslots(card, ch);
976}
977
978void falc_init_e1(pc300_t * card, int ch)
979{
980 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
981 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
982 falc_t *pfalc = (falc_t *) & chan->falc;
983 void __iomem *falcbase = card->hw.falcbase;
984 ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0);
985
986 /* Switch to E1 mode (PCM 30) */
987 cpc_writeb(falcbase + F_REG(FMR1, ch),
988 cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_PMOD);
989
990 /* Clock mode */
991 if (conf->phys_settings.clock_type == CLOCK_INT) { /* Master mode */
992 cpc_writeb(falcbase + F_REG(LIM0, ch),
993 cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_MAS);
994 } else { /* Slave mode */
995 cpc_writeb(falcbase + F_REG(LIM0, ch),
996 cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_MAS);
997 }
998 cpc_writeb(falcbase + F_REG(LOOP, ch),
999 cpc_readb(falcbase + F_REG(LOOP, ch)) & ~LOOP_SFM);
1000
1001 cpc_writeb(falcbase + F_REG(IPC, ch), IPC_SCI);
1002 cpc_writeb(falcbase + F_REG(FMR0, ch),
1003 cpc_readb(falcbase + F_REG(FMR0, ch)) &
1004 ~(FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1));
1005
1006 switch (conf->lcode) {
1007 case PC300_LC_AMI:
1008 cpc_writeb(falcbase + F_REG(FMR0, ch),
1009 cpc_readb(falcbase + F_REG(FMR0, ch)) |
1010 FMR0_XC1 | FMR0_RC1);
1011 break;
1012
1013 case PC300_LC_HDB3:
1014 cpc_writeb(falcbase + F_REG(FMR0, ch),
1015 cpc_readb(falcbase + F_REG(FMR0, ch)) |
1016 FMR0_XC0 | FMR0_XC1 | FMR0_RC0 | FMR0_RC1);
1017 break;
1018
1019 case PC300_LC_NRZ:
1020 break;
1021 }
1022
1023 cpc_writeb(falcbase + F_REG(LIM0, ch),
1024 cpc_readb(falcbase + F_REG(LIM0, ch)) & ~(LIM0_SCL1 | LIM0_SCL0));
1025 /* Set interface mode to 2 MBPS */
1026 cpc_writeb(falcbase + F_REG(FMR1, ch),
1027 cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_IMOD);
1028
1029 cpc_writeb(falcbase + F_REG(XPM0, ch), 0x18);
1030 cpc_writeb(falcbase + F_REG(XPM1, ch), 0x03);
1031 cpc_writeb(falcbase + F_REG(XPM2, ch), 0x00);
1032
1033 switch (conf->fr_mode) {
1034 case PC300_FR_MF_CRC4:
1035 pfalc->multiframe_mode = 1;
1036 cpc_writeb(falcbase + F_REG(FMR1, ch),
1037 cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_XFS);
1038 cpc_writeb(falcbase + F_REG(FMR2, ch),
1039 cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_RFS1);
1040 cpc_writeb(falcbase + F_REG(FMR2, ch),
1041 cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_RFS0);
1042 cpc_writeb(falcbase + F_REG(FMR3, ch),
1043 cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_EXTIW);
1044
1045 /* MultiFrame Resynchronization */
1046 cpc_writeb(falcbase + F_REG(FMR1, ch),
1047 cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_MFCS);
1048
1049 /* Automatic Loss of Multiframe > 914 CRC errors */
1050 cpc_writeb(falcbase + F_REG(FMR2, ch),
1051 cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_ALMF);
1052
1053 /* S1 and SI1/SI2 spare Bits set to 1 */
1054 cpc_writeb(falcbase + F_REG(XSP, ch),
1055 cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_AXS);
1056 cpc_writeb(falcbase + F_REG(XSP, ch),
1057 cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_EBP);
1058 cpc_writeb(falcbase + F_REG(XSP, ch),
1059 cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XS13 | XSP_XS15);
1060
1061 /* Automatic Force Resynchronization */
1062 cpc_writeb(falcbase + F_REG(FMR1, ch),
1063 cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR);
1064
1065 /* Transmit Automatic Remote Alarm */
1066 cpc_writeb(falcbase + F_REG(FMR2, ch),
1067 cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA);
1068
1069 /* Transmit Spare Bits for National Use (Y, Sn, Sa) */
1070 cpc_writeb(falcbase + F_REG(XSW, ch),
1071 cpc_readb(falcbase + F_REG(XSW, ch)) |
1072 XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4);
1073 break;
1074
1075 case PC300_FR_MF_NON_CRC4:
1076 case PC300_FR_D4:
1077 pfalc->multiframe_mode = 0;
1078 cpc_writeb(falcbase + F_REG(FMR1, ch),
1079 cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS);
1080 cpc_writeb(falcbase + F_REG(FMR2, ch),
1081 cpc_readb(falcbase + F_REG(FMR2, ch)) &
1082 ~(FMR2_RFS1 | FMR2_RFS0));
1083 cpc_writeb(falcbase + F_REG(XSW, ch),
1084 cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XSIS);
1085 cpc_writeb(falcbase + F_REG(XSP, ch),
1086 cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_XSIF);
1087
1088 /* Automatic Force Resynchronization */
1089 cpc_writeb(falcbase + F_REG(FMR1, ch),
1090 cpc_readb(falcbase + F_REG(FMR1, ch)) | FMR1_AFR);
1091
1092 /* Transmit Automatic Remote Alarm */
1093 cpc_writeb(falcbase + F_REG(FMR2, ch),
1094 cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_AXRA);
1095
1096 /* Transmit Spare Bits for National Use (Y, Sn, Sa) */
1097 cpc_writeb(falcbase + F_REG(XSW, ch),
1098 cpc_readb(falcbase + F_REG(XSW, ch)) |
1099 XSW_XY0 | XSW_XY1 | XSW_XY2 | XSW_XY3 | XSW_XY4);
1100 break;
1101
1102 case PC300_FR_UNFRAMED:
1103 pfalc->multiframe_mode = 0;
1104 cpc_writeb(falcbase + F_REG(FMR1, ch),
1105 cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_XFS);
1106 cpc_writeb(falcbase + F_REG(FMR2, ch),
1107 cpc_readb(falcbase + F_REG(FMR2, ch)) &
1108 ~(FMR2_RFS1 | FMR2_RFS0));
1109 cpc_writeb(falcbase + F_REG(XSP, ch),
1110 cpc_readb(falcbase + F_REG(XSP, ch)) | XSP_TT0);
1111 cpc_writeb(falcbase + F_REG(XSW, ch),
1112 cpc_readb(falcbase + F_REG(XSW, ch)) &
1113 ~(XSW_XTM|XSW_XY0|XSW_XY1|XSW_XY2|XSW_XY3|XSW_XY4));
1114 cpc_writeb(falcbase + F_REG(TSWM, ch), 0xff);
1115 cpc_writeb(falcbase + F_REG(FMR2, ch),
1116 cpc_readb(falcbase + F_REG(FMR2, ch)) |
1117 (FMR2_RTM | FMR2_DAIS));
1118 cpc_writeb(falcbase + F_REG(FMR2, ch),
1119 cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_AXRA);
1120 cpc_writeb(falcbase + F_REG(FMR1, ch),
1121 cpc_readb(falcbase + F_REG(FMR1, ch)) & ~FMR1_AFR);
1122 pfalc->sync = 1;
1123 cpc_writeb(falcbase + card->hw.cpld_reg2,
1124 cpc_readb(falcbase + card->hw.cpld_reg2) |
1125 (CPLD_REG2_FALC_LED2 << (2 * ch)));
1126 break;
1127 }
1128
1129 /* No signaling */
1130 cpc_writeb(falcbase + F_REG(XSP, ch),
1131 cpc_readb(falcbase + F_REG(XSP, ch)) & ~XSP_CASEN);
1132 cpc_writeb(falcbase + F_REG(CCR1, ch), 0);
1133
1134 cpc_writeb(falcbase + F_REG(LIM1, ch),
1135 cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RIL0 | LIM1_RIL1);
1136 cpc_writeb(falcbase + F_REG(LIM2, ch), (LIM2_LOS1 | dja));
1137
1138 /* Transmit Clock-Slot Offset */
1139 cpc_writeb(falcbase + F_REG(XC0, ch),
1140 cpc_readb(falcbase + F_REG(XC0, ch)) | 0x01);
1141 /* Transmit Time-slot Offset */
1142 cpc_writeb(falcbase + F_REG(XC1, ch), 0x3e);
1143 /* Receive Clock-Slot offset */
1144 cpc_writeb(falcbase + F_REG(RC0, ch), 0x05);
1145 /* Receive Time-slot offset */
1146 cpc_writeb(falcbase + F_REG(RC1, ch), 0x00);
1147
1148 /* LOS Detection after 176 consecutive 0s */
1149 cpc_writeb(falcbase + F_REG(PCDR, ch), 0x0a);
1150 /* LOS Recovery after 22 ones in the time window of PCD */
1151 cpc_writeb(falcbase + F_REG(PCRR, ch), 0x15);
1152
1153 cpc_writeb(falcbase + F_REG(IDLE, ch), 0x7f);
1154
1155 falc_close_all_timeslots(card, ch);
1156}
1157
1158void falc_init_hdlc(pc300_t * card, int ch)
1159{
1160 void __iomem *falcbase = card->hw.falcbase;
1161 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1162 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1163
1164 /* Enable transparent data transfer */
1165 if (conf->fr_mode == PC300_FR_UNFRAMED) {
1166 cpc_writeb(falcbase + F_REG(MODE, ch), 0);
1167 } else {
1168 cpc_writeb(falcbase + F_REG(MODE, ch),
1169 cpc_readb(falcbase + F_REG(MODE, ch)) |
1170 (MODE_HRAC | MODE_MDS2));
1171 cpc_writeb(falcbase + F_REG(RAH2, ch), 0xff);
1172 cpc_writeb(falcbase + F_REG(RAH1, ch), 0xff);
1173 cpc_writeb(falcbase + F_REG(RAL2, ch), 0xff);
1174 cpc_writeb(falcbase + F_REG(RAL1, ch), 0xff);
1175 }
1176
1177 /* Tx/Rx reset */
1178 falc_issue_cmd(card, ch, CMDR_RRES | CMDR_XRES | CMDR_SRES);
1179
1180 /* Enable interrupt sources */
1181 falc_intr_enable(card, ch);
1182}
1183
1184void te_config(pc300_t * card, int ch)
1185{
1186 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1187 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1188 falc_t *pfalc = (falc_t *) & chan->falc;
1189 void __iomem *falcbase = card->hw.falcbase;
1190 ucchar dummy;
1191 unsigned long flags;
1192
1193 memset(pfalc, 0, sizeof(falc_t));
1194 switch (conf->media) {
1195 case IF_IFACE_T1:
1196 pfalc->num_channels = NUM_OF_T1_CHANNELS;
1197 pfalc->offset = 1;
1198 break;
1199 case IF_IFACE_E1:
1200 pfalc->num_channels = NUM_OF_E1_CHANNELS;
1201 pfalc->offset = 0;
1202 break;
1203 }
1204 if (conf->tslot_bitmap == 0xffffffffUL)
1205 pfalc->full_bandwidth = 1;
1206 else
1207 pfalc->full_bandwidth = 0;
1208
1209 CPC_LOCK(card, flags);
1210 /* Reset the FALC chip */
1211 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
1212 cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
1213 (CPLD_REG1_FALC_RESET << (2 * ch)));
1214 udelay(10000);
1215 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
1216 cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) &
1217 ~(CPLD_REG1_FALC_RESET << (2 * ch)));
1218
1219 if (conf->media == IF_IFACE_T1) {
1220 falc_init_t1(card, ch);
1221 } else {
1222 falc_init_e1(card, ch);
1223 }
1224 falc_init_hdlc(card, ch);
1225 if (conf->rx_sens == PC300_RX_SENS_SH) {
1226 cpc_writeb(falcbase + F_REG(LIM0, ch),
1227 cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_EQON);
1228 } else {
1229 cpc_writeb(falcbase + F_REG(LIM0, ch),
1230 cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_EQON);
1231 }
1232 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
1233 cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) |
1234 ((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK) << (2 * ch)));
1235
1236 /* Clear all interrupt registers */
1237 dummy = cpc_readb(falcbase + F_REG(FISR0, ch)) +
1238 cpc_readb(falcbase + F_REG(FISR1, ch)) +
1239 cpc_readb(falcbase + F_REG(FISR2, ch)) +
1240 cpc_readb(falcbase + F_REG(FISR3, ch));
1241 CPC_UNLOCK(card, flags);
1242}
1243
1244void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
1245{
1246 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1247 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1248 falc_t *pfalc = (falc_t *) & chan->falc;
1249 void __iomem *falcbase = card->hw.falcbase;
1250
1251 /* Verify LOS */
1252 if (frs0 & FRS0_LOS) {
1253 if (!pfalc->red_alarm) {
1254 pfalc->red_alarm = 1;
1255 pfalc->los++;
1256 if (!pfalc->blue_alarm) {
1257 // EVENT_FALC_ABNORMAL
1258 if (conf->media == IF_IFACE_T1) {
1259 /* Disable this interrupt as it may otherwise interfere
1260 * with other working boards. */
1261 cpc_writeb(falcbase + F_REG(IMR0, ch),
1262 cpc_readb(falcbase + F_REG(IMR0, ch))
1263 | IMR0_PDEN);
1264 }
1265 falc_disable_comm(card, ch);
1266 // EVENT_FALC_ABNORMAL
1267 }
1268 }
1269 } else {
1270 if (pfalc->red_alarm) {
1271 pfalc->red_alarm = 0;
1272 pfalc->losr++;
1273 }
1274 }
1275
1276 if (conf->fr_mode != PC300_FR_UNFRAMED) {
1277 /* Verify AIS alarm */
1278 if (frs0 & FRS0_AIS) {
1279 if (!pfalc->blue_alarm) {
1280 pfalc->blue_alarm = 1;
1281 pfalc->ais++;
1282 // EVENT_AIS
1283 if (conf->media == IF_IFACE_T1) {
1284 /* Disable this interrupt as it may otherwise interfere with other working boards. */
1285 cpc_writeb(falcbase + F_REG(IMR0, ch),
1286 cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
1287 }
1288 falc_disable_comm(card, ch);
1289 // EVENT_AIS
1290 }
1291 } else {
1292 pfalc->blue_alarm = 0;
1293 }
1294
1295 /* Verify LFA */
1296 if (frs0 & FRS0_LFA) {
1297 if (!pfalc->loss_fa) {
1298 pfalc->loss_fa = 1;
1299 pfalc->lfa++;
1300 if (!pfalc->blue_alarm && !pfalc->red_alarm) {
1301 // EVENT_FALC_ABNORMAL
1302 if (conf->media == IF_IFACE_T1) {
1303 /* Disable this interrupt as it may otherwise
1304 * interfere with other working boards. */
1305 cpc_writeb(falcbase + F_REG(IMR0, ch),
1306 cpc_readb(falcbase + F_REG(IMR0, ch))
1307 | IMR0_PDEN);
1308 }
1309 falc_disable_comm(card, ch);
1310 // EVENT_FALC_ABNORMAL
1311 }
1312 }
1313 } else {
1314 if (pfalc->loss_fa) {
1315 pfalc->loss_fa = 0;
1316 pfalc->farec++;
1317 }
1318 }
1319
1320 /* Verify LMFA */
1321 if (pfalc->multiframe_mode && (frs0 & FRS0_LMFA)) {
1322 /* D4 or CRC4 frame mode */
1323 if (!pfalc->loss_mfa) {
1324 pfalc->loss_mfa = 1;
1325 pfalc->lmfa++;
1326 if (!pfalc->blue_alarm && !pfalc->red_alarm &&
1327 !pfalc->loss_fa) {
1328 // EVENT_FALC_ABNORMAL
1329 if (conf->media == IF_IFACE_T1) {
1330 /* Disable this interrupt as it may otherwise
1331 * interfere with other working boards. */
1332 cpc_writeb(falcbase + F_REG(IMR0, ch),
1333 cpc_readb(falcbase + F_REG(IMR0, ch))
1334 | IMR0_PDEN);
1335 }
1336 falc_disable_comm(card, ch);
1337 // EVENT_FALC_ABNORMAL
1338 }
1339 }
1340 } else {
1341 pfalc->loss_mfa = 0;
1342 }
1343
1344 /* Verify Remote Alarm */
1345 if (frs0 & FRS0_RRA) {
1346 if (!pfalc->yellow_alarm) {
1347 pfalc->yellow_alarm = 1;
1348 pfalc->rai++;
1349 if (pfalc->sync) {
1350 // EVENT_RAI
1351 falc_disable_comm(card, ch);
1352 // EVENT_RAI
1353 }
1354 }
1355 } else {
1356 pfalc->yellow_alarm = 0;
1357 }
1358 } /* if !PC300_UNFRAMED */
1359
1360 if (pfalc->red_alarm || pfalc->loss_fa ||
1361 pfalc->loss_mfa || pfalc->blue_alarm) {
1362 if (pfalc->sync) {
1363 pfalc->sync = 0;
1364 chan->d.line_off++;
1365 cpc_writeb(falcbase + card->hw.cpld_reg2,
1366 cpc_readb(falcbase + card->hw.cpld_reg2) &
1367 ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
1368 }
1369 } else {
1370 if (!pfalc->sync) {
1371 pfalc->sync = 1;
1372 chan->d.line_on++;
1373 cpc_writeb(falcbase + card->hw.cpld_reg2,
1374 cpc_readb(falcbase + card->hw.cpld_reg2) |
1375 (CPLD_REG2_FALC_LED2 << (2 * ch)));
1376 }
1377 }
1378
1379 if (pfalc->sync && !pfalc->yellow_alarm) {
1380 if (!pfalc->active) {
1381 // EVENT_FALC_NORMAL
1382 if (pfalc->loop_active) {
1383 return;
1384 }
1385 if (conf->media == IF_IFACE_T1) {
1386 cpc_writeb(falcbase + F_REG(IMR0, ch),
1387 cpc_readb(falcbase + F_REG(IMR0, ch)) & ~IMR0_PDEN);
1388 }
1389 falc_enable_comm(card, ch);
1390 // EVENT_FALC_NORMAL
1391 pfalc->active = 1;
1392 }
1393 } else {
1394 if (pfalc->active) {
1395 pfalc->active = 0;
1396 }
1397 }
1398}
1399
1400void falc_update_stats(pc300_t * card, int ch)
1401{
1402 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1403 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1404 falc_t *pfalc = (falc_t *) & chan->falc;
1405 void __iomem *falcbase = card->hw.falcbase;
1406 ucshort counter;
1407
1408 counter = cpc_readb(falcbase + F_REG(FECL, ch));
1409 counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8;
1410 pfalc->fec += counter;
1411
1412 counter = cpc_readb(falcbase + F_REG(CVCL, ch));
1413 counter |= cpc_readb(falcbase + F_REG(CVCH, ch)) << 8;
1414 pfalc->cvc += counter;
1415
1416 counter = cpc_readb(falcbase + F_REG(CECL, ch));
1417 counter |= cpc_readb(falcbase + F_REG(CECH, ch)) << 8;
1418 pfalc->cec += counter;
1419
1420 counter = cpc_readb(falcbase + F_REG(EBCL, ch));
1421 counter |= cpc_readb(falcbase + F_REG(EBCH, ch)) << 8;
1422 pfalc->ebc += counter;
1423
1424 if (cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) {
1425 mdelay(10);
1426 counter = cpc_readb(falcbase + F_REG(BECL, ch));
1427 counter |= cpc_readb(falcbase + F_REG(BECH, ch)) << 8;
1428 pfalc->bec += counter;
1429
1430 if (((conf->media == IF_IFACE_T1) &&
1431 (cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_LLBAD) &&
1432 (!(cpc_readb(falcbase + F_REG(FRS1, ch)) & FRS1_PDEN)))
1433 ||
1434 ((conf->media == IF_IFACE_E1) &&
1435 (cpc_readb(falcbase + F_REG(RSP, ch)) & RSP_LLBAD))) {
1436 pfalc->prbs = 2;
1437 } else {
1438 pfalc->prbs = 1;
1439 }
1440 }
1441}
1442
1443/*----------------------------------------------------------------------------
1444 * falc_remote_loop
1445 *----------------------------------------------------------------------------
1446 * Description: In the remote loopback mode the clock and data recovered
1447 * from the line inputs RL1/2 or RDIP/RDIN are routed back
1448 * to the line outputs XL1/2 or XDOP/XDON via the analog
1449 * transmitter. As in normal mode they are processsed by
1450 * the synchronizer and then sent to the system interface.
1451 *----------------------------------------------------------------------------
1452 */
1453void falc_remote_loop(pc300_t * card, int ch, int loop_on)
1454{
1455 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1456 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1457 falc_t *pfalc = (falc_t *) & chan->falc;
1458 void __iomem *falcbase = card->hw.falcbase;
1459
1460 if (loop_on) {
1461 // EVENT_FALC_ABNORMAL
1462 if (conf->media == IF_IFACE_T1) {
1463 /* Disable this interrupt as it may otherwise interfere with
1464 * other working boards. */
1465 cpc_writeb(falcbase + F_REG(IMR0, ch),
1466 cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
1467 }
1468 falc_disable_comm(card, ch);
1469 // EVENT_FALC_ABNORMAL
1470 cpc_writeb(falcbase + F_REG(LIM1, ch),
1471 cpc_readb(falcbase + F_REG(LIM1, ch)) | LIM1_RL);
1472 pfalc->loop_active = 1;
1473 } else {
1474 cpc_writeb(falcbase + F_REG(LIM1, ch),
1475 cpc_readb(falcbase + F_REG(LIM1, ch)) & ~LIM1_RL);
1476 pfalc->sync = 0;
1477 cpc_writeb(falcbase + card->hw.cpld_reg2,
1478 cpc_readb(falcbase + card->hw.cpld_reg2) &
1479 ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
1480 pfalc->active = 0;
1481 falc_issue_cmd(card, ch, CMDR_XRES);
1482 pfalc->loop_active = 0;
1483 }
1484}
1485
1486/*----------------------------------------------------------------------------
1487 * falc_local_loop
1488 *----------------------------------------------------------------------------
1489 * Description: The local loopback mode disconnects the receive lines
1490 * RL1/RL2 resp. RDIP/RDIN from the receiver. Instead of the
1491 * signals coming from the line the data provided by system
1492 * interface are routed through the analog receiver back to
1493 * the system interface. The unipolar bit stream will be
1494 * undisturbed transmitted on the line. Receiver and transmitter
1495 * coding must be identical.
1496 *----------------------------------------------------------------------------
1497 */
1498void falc_local_loop(pc300_t * card, int ch, int loop_on)
1499{
1500 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1501 falc_t *pfalc = (falc_t *) & chan->falc;
1502 void __iomem *falcbase = card->hw.falcbase;
1503
1504 if (loop_on) {
1505 cpc_writeb(falcbase + F_REG(LIM0, ch),
1506 cpc_readb(falcbase + F_REG(LIM0, ch)) | LIM0_LL);
1507 pfalc->loop_active = 1;
1508 } else {
1509 cpc_writeb(falcbase + F_REG(LIM0, ch),
1510 cpc_readb(falcbase + F_REG(LIM0, ch)) & ~LIM0_LL);
1511 pfalc->loop_active = 0;
1512 }
1513}
1514
1515/*----------------------------------------------------------------------------
1516 * falc_payload_loop
1517 *----------------------------------------------------------------------------
1518 * Description: This routine allows to enable/disable payload loopback.
1519 * When the payload loop is activated, the received 192 bits
1520 * of payload data will be looped back to the transmit
1521 * direction. The framing bits, CRC6 and DL bits are not
1522 * looped. They are originated by the FALC-LH transmitter.
1523 *----------------------------------------------------------------------------
1524 */
1525void falc_payload_loop(pc300_t * card, int ch, int loop_on)
1526{
1527 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1528 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1529 falc_t *pfalc = (falc_t *) & chan->falc;
1530 void __iomem *falcbase = card->hw.falcbase;
1531
1532 if (loop_on) {
1533 // EVENT_FALC_ABNORMAL
1534 if (conf->media == IF_IFACE_T1) {
1535 /* Disable this interrupt as it may otherwise interfere with
1536 * other working boards. */
1537 cpc_writeb(falcbase + F_REG(IMR0, ch),
1538 cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
1539 }
1540 falc_disable_comm(card, ch);
1541 // EVENT_FALC_ABNORMAL
1542 cpc_writeb(falcbase + F_REG(FMR2, ch),
1543 cpc_readb(falcbase + F_REG(FMR2, ch)) | FMR2_PLB);
1544 if (conf->media == IF_IFACE_T1) {
1545 cpc_writeb(falcbase + F_REG(FMR4, ch),
1546 cpc_readb(falcbase + F_REG(FMR4, ch)) | FMR4_TM);
1547 } else {
1548 cpc_writeb(falcbase + F_REG(FMR5, ch),
1549 cpc_readb(falcbase + F_REG(FMR5, ch)) | XSP_TT0);
1550 }
1551 falc_open_all_timeslots(card, ch);
1552 pfalc->loop_active = 2;
1553 } else {
1554 cpc_writeb(falcbase + F_REG(FMR2, ch),
1555 cpc_readb(falcbase + F_REG(FMR2, ch)) & ~FMR2_PLB);
1556 if (conf->media == IF_IFACE_T1) {
1557 cpc_writeb(falcbase + F_REG(FMR4, ch),
1558 cpc_readb(falcbase + F_REG(FMR4, ch)) & ~FMR4_TM);
1559 } else {
1560 cpc_writeb(falcbase + F_REG(FMR5, ch),
1561 cpc_readb(falcbase + F_REG(FMR5, ch)) & ~XSP_TT0);
1562 }
1563 pfalc->sync = 0;
1564 cpc_writeb(falcbase + card->hw.cpld_reg2,
1565 cpc_readb(falcbase + card->hw.cpld_reg2) &
1566 ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
1567 pfalc->active = 0;
1568 falc_issue_cmd(card, ch, CMDR_XRES);
1569 pfalc->loop_active = 0;
1570 }
1571}
1572
1573/*----------------------------------------------------------------------------
1574 * turn_off_xlu
1575 *----------------------------------------------------------------------------
1576 * Description: Turns XLU bit off in the proper register
1577 *----------------------------------------------------------------------------
1578 */
1579void turn_off_xlu(pc300_t * card, int ch)
1580{
1581 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1582 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1583 void __iomem *falcbase = card->hw.falcbase;
1584
1585 if (conf->media == IF_IFACE_T1) {
1586 cpc_writeb(falcbase + F_REG(FMR5, ch),
1587 cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLU);
1588 } else {
1589 cpc_writeb(falcbase + F_REG(FMR3, ch),
1590 cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLU);
1591 }
1592}
1593
1594/*----------------------------------------------------------------------------
1595 * turn_off_xld
1596 *----------------------------------------------------------------------------
1597 * Description: Turns XLD bit off in the proper register
1598 *----------------------------------------------------------------------------
1599 */
1600void turn_off_xld(pc300_t * card, int ch)
1601{
1602 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1603 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1604 void __iomem *falcbase = card->hw.falcbase;
1605
1606 if (conf->media == IF_IFACE_T1) {
1607 cpc_writeb(falcbase + F_REG(FMR5, ch),
1608 cpc_readb(falcbase + F_REG(FMR5, ch)) & ~FMR5_XLD);
1609 } else {
1610 cpc_writeb(falcbase + F_REG(FMR3, ch),
1611 cpc_readb(falcbase + F_REG(FMR3, ch)) & ~FMR3_XLD);
1612 }
1613}
1614
1615/*----------------------------------------------------------------------------
1616 * falc_generate_loop_up_code
1617 *----------------------------------------------------------------------------
1618 * Description: This routine writes the proper FALC chip register in order
1619 * to generate a LOOP activation code over a T1/E1 line.
1620 *----------------------------------------------------------------------------
1621 */
1622void falc_generate_loop_up_code(pc300_t * card, int ch)
1623{
1624 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1625 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1626 falc_t *pfalc = (falc_t *) & chan->falc;
1627 void __iomem *falcbase = card->hw.falcbase;
1628
1629 if (conf->media == IF_IFACE_T1) {
1630 cpc_writeb(falcbase + F_REG(FMR5, ch),
1631 cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLU);
1632 } else {
1633 cpc_writeb(falcbase + F_REG(FMR3, ch),
1634 cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLU);
1635 }
1636 // EVENT_FALC_ABNORMAL
1637 if (conf->media == IF_IFACE_T1) {
1638 /* Disable this interrupt as it may otherwise interfere with
1639 * other working boards. */
1640 cpc_writeb(falcbase + F_REG(IMR0, ch),
1641 cpc_readb(falcbase + F_REG(IMR0, ch)) | IMR0_PDEN);
1642 }
1643 falc_disable_comm(card, ch);
1644 // EVENT_FALC_ABNORMAL
1645 pfalc->loop_gen = 1;
1646}
1647
1648/*----------------------------------------------------------------------------
1649 * falc_generate_loop_down_code
1650 *----------------------------------------------------------------------------
1651 * Description: This routine writes the proper FALC chip register in order
1652 * to generate a LOOP deactivation code over a T1/E1 line.
1653 *----------------------------------------------------------------------------
1654 */
1655void falc_generate_loop_down_code(pc300_t * card, int ch)
1656{
1657 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1658 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1659 falc_t *pfalc = (falc_t *) & chan->falc;
1660 void __iomem *falcbase = card->hw.falcbase;
1661
1662 if (conf->media == IF_IFACE_T1) {
1663 cpc_writeb(falcbase + F_REG(FMR5, ch),
1664 cpc_readb(falcbase + F_REG(FMR5, ch)) | FMR5_XLD);
1665 } else {
1666 cpc_writeb(falcbase + F_REG(FMR3, ch),
1667 cpc_readb(falcbase + F_REG(FMR3, ch)) | FMR3_XLD);
1668 }
1669 pfalc->sync = 0;
1670 cpc_writeb(falcbase + card->hw.cpld_reg2,
1671 cpc_readb(falcbase + card->hw.cpld_reg2) &
1672 ~(CPLD_REG2_FALC_LED2 << (2 * ch)));
1673 pfalc->active = 0;
1674//? falc_issue_cmd(card, ch, CMDR_XRES);
1675 pfalc->loop_gen = 0;
1676}
1677
1678/*----------------------------------------------------------------------------
1679 * falc_pattern_test
1680 *----------------------------------------------------------------------------
1681 * Description: This routine generates a pattern code and checks
1682 * it on the reception side.
1683 *----------------------------------------------------------------------------
1684 */
1685void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
1686{
1687 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1688 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
1689 falc_t *pfalc = (falc_t *) & chan->falc;
1690 void __iomem *falcbase = card->hw.falcbase;
1691
1692 if (activate) {
1693 pfalc->prbs = 1;
1694 pfalc->bec = 0;
1695 if (conf->media == IF_IFACE_T1) {
1696 /* Disable local loop activation/deactivation detect */
1697 cpc_writeb(falcbase + F_REG(IMR3, ch),
1698 cpc_readb(falcbase + F_REG(IMR3, ch)) | IMR3_LLBSC);
1699 } else {
1700 /* Disable local loop activation/deactivation detect */
1701 cpc_writeb(falcbase + F_REG(IMR1, ch),
1702 cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_LLBSC);
1703 }
1704 /* Activates generation and monitoring of PRBS
1705 * (Pseudo Random Bit Sequence) */
1706 cpc_writeb(falcbase + F_REG(LCR1, ch),
1707 cpc_readb(falcbase + F_REG(LCR1, ch)) | LCR1_EPRM | LCR1_XPRBS);
1708 } else {
1709 pfalc->prbs = 0;
1710 /* Deactivates generation and monitoring of PRBS
1711 * (Pseudo Random Bit Sequence) */
1712 cpc_writeb(falcbase + F_REG(LCR1, ch),
1713 cpc_readb(falcbase+F_REG(LCR1,ch)) & ~(LCR1_EPRM | LCR1_XPRBS));
1714 if (conf->media == IF_IFACE_T1) {
1715 /* Enable local loop activation/deactivation detect */
1716 cpc_writeb(falcbase + F_REG(IMR3, ch),
1717 cpc_readb(falcbase + F_REG(IMR3, ch)) & ~IMR3_LLBSC);
1718 } else {
1719 /* Enable local loop activation/deactivation detect */
1720 cpc_writeb(falcbase + F_REG(IMR1, ch),
1721 cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_LLBSC);
1722 }
1723 }
1724}
1725
1726/*----------------------------------------------------------------------------
1727 * falc_pattern_test_error
1728 *----------------------------------------------------------------------------
1729 * Description: This routine returns the bit error counter value
1730 *----------------------------------------------------------------------------
1731 */
1732ucshort falc_pattern_test_error(pc300_t * card, int ch)
1733{
1734 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1735 falc_t *pfalc = (falc_t *) & chan->falc;
1736
1737 return (pfalc->bec);
1738}
1739
1740/**********************************/
1741/*** Net Interface Routines ***/
1742/**********************************/
1743
1744static void
1745cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
1746{
1747 struct sk_buff *skb;
1748
1749 if ((skb = dev_alloc_skb(10 + skb_main->len)) == NULL) {
1750 printk("%s: out of memory\n", dev->name);
1751 return;
1752 }
1753 skb_put(skb, 10 + skb_main->len);
1754
1755 skb->dev = dev;
1756 skb->protocol = htons(ETH_P_CUST);
1757 skb->mac.raw = skb->data;
1758 skb->pkt_type = PACKET_HOST;
1759 skb->len = 10 + skb_main->len;
1760
1761 memcpy(skb->data, dev->name, 5);
1762 skb->data[5] = '[';
1763 skb->data[6] = rx_tx;
1764 skb->data[7] = ']';
1765 skb->data[8] = ':';
1766 skb->data[9] = ' ';
1767 memcpy(&skb->data[10], skb_main->data, skb_main->len);
1768
1769 netif_rx(skb);
1770}
1771
1772void cpc_tx_timeout(struct net_device *dev)
1773{
1774 pc300dev_t *d = (pc300dev_t *) dev->priv;
1775 pc300ch_t *chan = (pc300ch_t *) d->chan;
1776 pc300_t *card = (pc300_t *) chan->card;
1777 struct net_device_stats *stats = hdlc_stats(dev);
1778 int ch = chan->channel;
1779 unsigned long flags;
1780 ucchar ilar;
1781
1782 stats->tx_errors++;
1783 stats->tx_aborted_errors++;
1784 CPC_LOCK(card, flags);
1785 if ((ilar = cpc_readb(card->hw.scabase + ILAR)) != 0) {
1786 printk("%s: ILAR=0x%x\n", dev->name, ilar);
1787 cpc_writeb(card->hw.scabase + ILAR, ilar);
1788 cpc_writeb(card->hw.scabase + DMER, 0x80);
1789 }
1790 if (card->hw.type == PC300_TE) {
1791 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
1792 cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
1793 ~(CPLD_REG2_FALC_LED1 << (2 * ch)));
1794 }
1795 dev->trans_start = jiffies;
1796 CPC_UNLOCK(card, flags);
1797 netif_wake_queue(dev);
1798}
1799
1800int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1801{
1802 pc300dev_t *d = (pc300dev_t *) dev->priv;
1803 pc300ch_t *chan = (pc300ch_t *) d->chan;
1804 pc300_t *card = (pc300_t *) chan->card;
1805 struct net_device_stats *stats = hdlc_stats(dev);
1806 int ch = chan->channel;
1807 unsigned long flags;
1808#ifdef PC300_DEBUG_TX
1809 int i;
1810#endif
1811
1812 if (chan->conf.monitor) {
1813 /* In monitor mode no Tx is done: ignore packet */
1814 dev_kfree_skb(skb);
1815 return 0;
1816 } else if (!netif_carrier_ok(dev)) {
1817 /* DCD must be OFF: drop packet */
1818 dev_kfree_skb(skb);
1819 stats->tx_errors++;
1820 stats->tx_carrier_errors++;
1821 return 0;
1822 } else if (cpc_readb(card->hw.scabase + M_REG(ST3, ch)) & ST3_DCD) {
1823 printk("%s: DCD is OFF. Going administrative down.\n", dev->name);
1824 stats->tx_errors++;
1825 stats->tx_carrier_errors++;
1826 dev_kfree_skb(skb);
1827 netif_carrier_off(dev);
1828 CPC_LOCK(card, flags);
1829 cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR);
1830 if (card->hw.type == PC300_TE) {
1831 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
1832 cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
1833 ~(CPLD_REG2_FALC_LED1 << (2 * ch)));
1834 }
1835 CPC_UNLOCK(card, flags);
1836 netif_wake_queue(dev);
1837 return 0;
1838 }
1839
1840 /* Write buffer to DMA buffers */
1841 if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) {
1842// printk("%s: write error. Dropping TX packet.\n", dev->name);
1843 netif_stop_queue(dev);
1844 dev_kfree_skb(skb);
1845 stats->tx_errors++;
1846 stats->tx_dropped++;
1847 return 0;
1848 }
1849#ifdef PC300_DEBUG_TX
1850 printk("%s T:", dev->name);
1851 for (i = 0; i < skb->len; i++)
1852 printk(" %02x", *(skb->data + i));
1853 printk("\n");
1854#endif
1855
1856 if (d->trace_on) {
1857 cpc_trace(dev, skb, 'T');
1858 }
1859 dev->trans_start = jiffies;
1860
1861 /* Start transmission */
1862 CPC_LOCK(card, flags);
1863 /* verify if it has more than one free descriptor */
1864 if (card->chan[ch].nfree_tx_bd <= 1) {
1865 /* don't have so stop the queue */
1866 netif_stop_queue(dev);
1867 }
1868 cpc_writel(card->hw.scabase + DTX_REG(EDAL, ch),
1869 TX_BD_ADDR(ch, chan->tx_next_bd));
1870 cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA);
1871 cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE);
1872 if (card->hw.type == PC300_TE) {
1873 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
1874 cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) |
1875 (CPLD_REG2_FALC_LED1 << (2 * ch)));
1876 }
1877 CPC_UNLOCK(card, flags);
1878 dev_kfree_skb(skb);
1879
1880 return 0;
1881}
1882
1883void cpc_net_rx(struct net_device *dev)
1884{
1885 pc300dev_t *d = (pc300dev_t *) dev->priv;
1886 pc300ch_t *chan = (pc300ch_t *) d->chan;
1887 pc300_t *card = (pc300_t *) chan->card;
1888 struct net_device_stats *stats = hdlc_stats(dev);
1889 int ch = chan->channel;
1890#ifdef PC300_DEBUG_RX
1891 int i;
1892#endif
1893 int rxb;
1894 struct sk_buff *skb;
1895
1896 while (1) {
1897 if ((rxb = dma_get_rx_frame_size(card, ch)) == -1)
1898 return;
1899
1900 if (!netif_carrier_ok(dev)) {
1901 /* DCD must be OFF: drop packet */
1902 printk("%s : DCD is OFF - drop %d rx bytes\n", dev->name, rxb);
1903 skb = NULL;
1904 } else {
1905 if (rxb > (dev->mtu + 40)) { /* add headers */
1906 printk("%s : MTU exceeded %d\n", dev->name, rxb);
1907 skb = NULL;
1908 } else {
1909 skb = dev_alloc_skb(rxb);
1910 if (skb == NULL) {
1911 printk("%s: Memory squeeze!!\n", dev->name);
1912 return;
1913 }
1914 skb->dev = dev;
1915 }
1916 }
1917
1918 if (((rxb = dma_buf_read(card, ch, skb)) <= 0) || (skb == NULL)) {
1919#ifdef PC300_DEBUG_RX
1920 printk("%s: rxb = %x\n", dev->name, rxb);
1921#endif
1922 if ((skb == NULL) && (rxb > 0)) {
1923 /* rxb > dev->mtu */
1924 stats->rx_errors++;
1925 stats->rx_length_errors++;
1926 continue;
1927 }
1928
1929 if (rxb < 0) { /* Invalid frame */
1930 rxb = -rxb;
1931 if (rxb & DST_OVR) {
1932 stats->rx_errors++;
1933 stats->rx_fifo_errors++;
1934 }
1935 if (rxb & DST_CRC) {
1936 stats->rx_errors++;
1937 stats->rx_crc_errors++;
1938 }
1939 if (rxb & (DST_RBIT | DST_SHRT | DST_ABT)) {
1940 stats->rx_errors++;
1941 stats->rx_frame_errors++;
1942 }
1943 }
1944 if (skb) {
1945 dev_kfree_skb_irq(skb);
1946 }
1947 continue;
1948 }
1949
1950 stats->rx_bytes += rxb;
1951
1952#ifdef PC300_DEBUG_RX
1953 printk("%s R:", dev->name);
1954 for (i = 0; i < skb->len; i++)
1955 printk(" %02x", *(skb->data + i));
1956 printk("\n");
1957#endif
1958 if (d->trace_on) {
1959 cpc_trace(dev, skb, 'R');
1960 }
1961 stats->rx_packets++;
1962 skb->protocol = hdlc_type_trans(skb, dev);
1963 netif_rx(skb);
1964 }
1965}
1966
1967/************************************/
1968/*** PC300 Interrupt Routines ***/
1969/************************************/
1970static void sca_tx_intr(pc300dev_t *dev)
1971{
1972 pc300ch_t *chan = (pc300ch_t *)dev->chan;
1973 pc300_t *card = (pc300_t *)chan->card;
1974 int ch = chan->channel;
1975 volatile pcsca_bd_t __iomem * ptdescr;
1976 struct net_device_stats *stats = hdlc_stats(dev->dev);
1977
1978 /* Clean up descriptors from previous transmission */
1979 ptdescr = (card->hw.rambase +
1980 TX_BD_ADDR(ch,chan->tx_first_bd));
1981 while ((cpc_readl(card->hw.scabase + DTX_REG(CDAL,ch)) !=
1982 TX_BD_ADDR(ch,chan->tx_first_bd)) &&
1983 (cpc_readb(&ptdescr->status) & DST_OSB)) {
1984 stats->tx_packets++;
1985 stats->tx_bytes += cpc_readw(&ptdescr->len);
1986 cpc_writeb(&ptdescr->status, DST_OSB);
1987 cpc_writew(&ptdescr->len, 0);
1988 chan->nfree_tx_bd++;
1989 chan->tx_first_bd = (chan->tx_first_bd + 1) & (N_DMA_TX_BUF - 1);
1990 ptdescr = (card->hw.rambase + TX_BD_ADDR(ch,chan->tx_first_bd));
1991 }
1992
1993#ifdef CONFIG_PC300_MLPPP
1994 if (chan->conf.proto == PC300_PROTO_MLPPP) {
1995 cpc_tty_trigger_poll(dev);
1996 } else {
1997#endif
1998 /* Tell the upper layer we are ready to transmit more packets */
1999 netif_wake_queue(dev->dev);
2000#ifdef CONFIG_PC300_MLPPP
2001 }
2002#endif
2003}
2004
2005static void sca_intr(pc300_t * card)
2006{
2007 void __iomem *scabase = card->hw.scabase;
2008 volatile uclong status;
2009 int ch;
2010 int intr_count = 0;
2011 unsigned char dsr_rx;
2012
2013 while ((status = cpc_readl(scabase + ISR0)) != 0) {
2014 for (ch = 0; ch < card->hw.nchan; ch++) {
2015 pc300ch_t *chan = &card->chan[ch];
2016 pc300dev_t *d = &chan->d;
2017 struct net_device *dev = d->dev;
2018 hdlc_device *hdlc = dev_to_hdlc(dev);
2019
2020 spin_lock(&card->card_lock);
2021
2022 /**** Reception ****/
2023 if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) {
2024 ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch));
2025
2026 /* Clear RX interrupts */
2027 cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE);
2028
2029#ifdef PC300_DEBUG_INTR
2030 printk ("sca_intr: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n",
2031 ch, status, drx_stat);
2032#endif
2033 if (status & IR0_DRX(IR0_DMIA, ch)) {
2034 if (drx_stat & DSR_BOF) {
2035#ifdef CONFIG_PC300_MLPPP
2036 if (chan->conf.proto == PC300_PROTO_MLPPP) {
2037 /* verify if driver is TTY */
2038 if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
2039 rx_dma_stop(card, ch);
2040 }
2041 cpc_tty_receive(d);
2042 rx_dma_start(card, ch);
2043 } else
2044#endif
2045 {
2046 if ((cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
2047 rx_dma_stop(card, ch);
2048 }
2049 cpc_net_rx(dev);
2050 /* Discard invalid frames */
2051 hdlc->stats.rx_errors++;
2052 hdlc->stats.rx_over_errors++;
2053 chan->rx_first_bd = 0;
2054 chan->rx_last_bd = N_DMA_RX_BUF - 1;
2055 rx_dma_start(card, ch);
2056 }
2057 }
2058 }
2059 if (status & IR0_DRX(IR0_DMIB, ch)) {
2060 if (drx_stat & DSR_EOM) {
2061 if (card->hw.type == PC300_TE) {
2062 cpc_writeb(card->hw.falcbase +
2063 card->hw.cpld_reg2,
2064 cpc_readb (card->hw.falcbase +
2065 card->hw.cpld_reg2) |
2066 (CPLD_REG2_FALC_LED1 << (2 * ch)));
2067 }
2068#ifdef CONFIG_PC300_MLPPP
2069 if (chan->conf.proto == PC300_PROTO_MLPPP) {
2070 /* verify if driver is TTY */
2071 cpc_tty_receive(d);
2072 } else {
2073 cpc_net_rx(dev);
2074 }
2075#else
2076 cpc_net_rx(dev);
2077#endif
2078 if (card->hw.type == PC300_TE) {
2079 cpc_writeb(card->hw.falcbase +
2080 card->hw.cpld_reg2,
2081 cpc_readb (card->hw.falcbase +
2082 card->hw.cpld_reg2) &
2083 ~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
2084 }
2085 }
2086 }
2087 if (!(dsr_rx = cpc_readb(scabase + DSR_RX(ch)) & DSR_DE)) {
2088#ifdef PC300_DEBUG_INTR
2089 printk("%s: RX intr chan[%d] (st=0x%08lx, dsr=0x%02x, dsr2=0x%02x)\n",
2090 dev->name, ch, status, drx_stat, dsr_rx);
2091#endif
2092 cpc_writeb(scabase + DSR_RX(ch), (dsr_rx | DSR_DE) & 0xfe);
2093 }
2094 }
2095
2096 /**** Transmission ****/
2097 if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) {
2098 ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch));
2099
2100 /* Clear TX interrupts */
2101 cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE);
2102
2103#ifdef PC300_DEBUG_INTR
2104 printk ("sca_intr: TX intr chan[%d] (st=0x%08lx, dsr=0x%02x)\n",
2105 ch, status, dtx_stat);
2106#endif
2107 if (status & IR0_DTX(IR0_EFT, ch)) {
2108 if (dtx_stat & DSR_UDRF) {
2109 if (cpc_readb (scabase + M_REG(TBN, ch)) != 0) {
2110 cpc_writeb(scabase + M_REG(CMD,ch), CMD_TX_BUF_CLR);
2111 }
2112 if (card->hw.type == PC300_TE) {
2113 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
2114 cpc_readb (card->hw.falcbase +
2115 card->hw.cpld_reg2) &
2116 ~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
2117 }
2118 hdlc->stats.tx_errors++;
2119 hdlc->stats.tx_fifo_errors++;
2120 sca_tx_intr(d);
2121 }
2122 }
2123 if (status & IR0_DTX(IR0_DMIA, ch)) {
2124 if (dtx_stat & DSR_BOF) {
2125 }
2126 }
2127 if (status & IR0_DTX(IR0_DMIB, ch)) {
2128 if (dtx_stat & DSR_EOM) {
2129 if (card->hw.type == PC300_TE) {
2130 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
2131 cpc_readb (card->hw.falcbase +
2132 card->hw.cpld_reg2) &
2133 ~ (CPLD_REG2_FALC_LED1 << (2 * ch)));
2134 }
2135 sca_tx_intr(d);
2136 }
2137 }
2138 }
2139
2140 /**** MSCI ****/
2141 if (status & IR0_M(IR0_RXINTA, ch)) {
2142 ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch));
2143
2144 /* Clear MSCI interrupts */
2145 cpc_writeb(scabase + M_REG(ST1, ch), st1);
2146
2147#ifdef PC300_DEBUG_INTR
2148 printk("sca_intr: MSCI intr chan[%d] (st=0x%08lx, st1=0x%02x)\n",
2149 ch, status, st1);
2150#endif
2151 if (st1 & ST1_CDCD) { /* DCD changed */
2152 if (cpc_readb(scabase + M_REG(ST3, ch)) & ST3_DCD) {
2153 printk ("%s: DCD is OFF. Going administrative down.\n",
2154 dev->name);
2155#ifdef CONFIG_PC300_MLPPP
2156 if (chan->conf.proto != PC300_PROTO_MLPPP) {
2157 netif_carrier_off(dev);
2158 }
2159#else
2160 netif_carrier_off(dev);
2161
2162#endif
2163 card->chan[ch].d.line_off++;
2164 } else { /* DCD = 1 */
2165 printk ("%s: DCD is ON. Going administrative up.\n",
2166 dev->name);
2167#ifdef CONFIG_PC300_MLPPP
2168 if (chan->conf.proto != PC300_PROTO_MLPPP)
2169 /* verify if driver is not TTY */
2170#endif
2171 netif_carrier_on(dev);
2172 card->chan[ch].d.line_on++;
2173 }
2174 }
2175 }
2176 spin_unlock(&card->card_lock);
2177 }
2178 if (++intr_count == 10)
2179 /* Too much work at this board. Force exit */
2180 break;
2181 }
2182}
2183
2184static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1)
2185{
2186 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2187 falc_t *pfalc = (falc_t *) & chan->falc;
2188 void __iomem *falcbase = card->hw.falcbase;
2189
2190 if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) &&
2191 !pfalc->loop_gen) {
2192 if (frs1 & FRS1_LLBDD) {
2193 // A Line Loop Back Deactivation signal detected
2194 if (pfalc->loop_active) {
2195 falc_remote_loop(card, ch, 0);
2196 }
2197 } else {
2198 if ((frs1 & FRS1_LLBAD) &&
2199 ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) {
2200 // A Line Loop Back Activation signal detected
2201 if (!pfalc->loop_active) {
2202 falc_remote_loop(card, ch, 1);
2203 }
2204 }
2205 }
2206 }
2207}
2208
2209static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp)
2210{
2211 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2212 falc_t *pfalc = (falc_t *) & chan->falc;
2213 void __iomem *falcbase = card->hw.falcbase;
2214
2215 if (((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_XPRBS) == 0) &&
2216 !pfalc->loop_gen) {
2217 if (rsp & RSP_LLBDD) {
2218 // A Line Loop Back Deactivation signal detected
2219 if (pfalc->loop_active) {
2220 falc_remote_loop(card, ch, 0);
2221 }
2222 } else {
2223 if ((rsp & RSP_LLBAD) &&
2224 ((cpc_readb(falcbase + F_REG(LCR1, ch)) & LCR1_EPRM) == 0)) {
2225 // A Line Loop Back Activation signal detected
2226 if (!pfalc->loop_active) {
2227 falc_remote_loop(card, ch, 1);
2228 }
2229 }
2230 }
2231 }
2232}
2233
2234static void falc_t1_intr(pc300_t * card, int ch)
2235{
2236 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2237 falc_t *pfalc = (falc_t *) & chan->falc;
2238 void __iomem *falcbase = card->hw.falcbase;
2239 ucchar isr0, isr3, gis;
2240 ucchar dummy;
2241
2242 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
2243 if (gis & GIS_ISR0) {
2244 isr0 = cpc_readb(falcbase + F_REG(FISR0, ch));
2245 if (isr0 & FISR0_PDEN) {
2246 /* Read the bit to clear the situation */
2247 if (cpc_readb(falcbase + F_REG(FRS1, ch)) &
2248 FRS1_PDEN) {
2249 pfalc->pden++;
2250 }
2251 }
2252 }
2253
2254 if (gis & GIS_ISR1) {
2255 dummy = cpc_readb(falcbase + F_REG(FISR1, ch));
2256 }
2257
2258 if (gis & GIS_ISR2) {
2259 dummy = cpc_readb(falcbase + F_REG(FISR2, ch));
2260 }
2261
2262 if (gis & GIS_ISR3) {
2263 isr3 = cpc_readb(falcbase + F_REG(FISR3, ch));
2264 if (isr3 & FISR3_SEC) {
2265 pfalc->sec++;
2266 falc_update_stats(card, ch);
2267 falc_check_status(card, ch,
2268 cpc_readb(falcbase + F_REG(FRS0, ch)));
2269 }
2270 if (isr3 & FISR3_ES) {
2271 pfalc->es++;
2272 }
2273 if (isr3 & FISR3_LLBSC) {
2274 falc_t1_loop_detection(card, ch,
2275 cpc_readb(falcbase + F_REG(FRS1, ch)));
2276 }
2277 }
2278 }
2279}
2280
2281static void falc_e1_intr(pc300_t * card, int ch)
2282{
2283 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
2284 falc_t *pfalc = (falc_t *) & chan->falc;
2285 void __iomem *falcbase = card->hw.falcbase;
2286 ucchar isr1, isr2, isr3, gis, rsp;
2287 ucchar dummy;
2288
2289 while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) {
2290 rsp = cpc_readb(falcbase + F_REG(RSP, ch));
2291
2292 if (gis & GIS_ISR0) {
2293 dummy = cpc_readb(falcbase + F_REG(FISR0, ch));
2294 }
2295 if (gis & GIS_ISR1) {
2296 isr1 = cpc_readb(falcbase + F_REG(FISR1, ch));
2297 if (isr1 & FISR1_XMB) {
2298 if ((pfalc->xmb_cause & 2)
2299 && pfalc->multiframe_mode) {
2300 if (cpc_readb (falcbase + F_REG(FRS0, ch)) &
2301 (FRS0_LOS | FRS0_AIS | FRS0_LFA)) {
2302 cpc_writeb(falcbase + F_REG(XSP, ch),
2303 cpc_readb(falcbase + F_REG(XSP, ch))
2304 & ~XSP_AXS);
2305 } else {
2306 cpc_writeb(falcbase + F_REG(XSP, ch),
2307 cpc_readb(falcbase + F_REG(XSP, ch))
2308 | XSP_AXS);
2309 }
2310 }
2311 pfalc->xmb_cause = 0;
2312 cpc_writeb(falcbase + F_REG(IMR1, ch),
2313 cpc_readb(falcbase + F_REG(IMR1, ch)) | IMR1_XMB);
2314 }
2315 if (isr1 & FISR1_LLBSC) {
2316 falc_e1_loop_detection(card, ch, rsp);
2317 }
2318 }
2319 if (gis & GIS_ISR2) {
2320 isr2 = cpc_readb(falcbase + F_REG(FISR2, ch));
2321 if (isr2 & FISR2_T400MS) {
2322 cpc_writeb(falcbase + F_REG(XSW, ch),
2323 cpc_readb(falcbase + F_REG(XSW, ch)) | XSW_XRA);
2324 }
2325 if (isr2 & FISR2_MFAR) {
2326 cpc_writeb(falcbase + F_REG(XSW, ch),
2327 cpc_readb(falcbase + F_REG(XSW, ch)) & ~XSW_XRA);
2328 }
2329 if (isr2 & (FISR2_FAR | FISR2_LFA | FISR2_AIS | FISR2_LOS)) {
2330 pfalc->xmb_cause |= 2;
2331 cpc_writeb(falcbase + F_REG(IMR1, ch),
2332 cpc_readb(falcbase + F_REG(IMR1, ch)) & ~IMR1_XMB);
2333 }
2334 }
2335 if (gis & GIS_ISR3) {
2336 isr3 = cpc_readb(falcbase + F_REG(FISR3, ch));
2337 if (isr3 & FISR3_SEC) {
2338 pfalc->sec++;
2339 falc_update_stats(card, ch);
2340 falc_check_status(card, ch,
2341 cpc_readb(falcbase + F_REG(FRS0, ch)));
2342 }
2343 if (isr3 & FISR3_ES) {
2344 pfalc->es++;
2345 }
2346 }
2347 }
2348}
2349
2350static void falc_intr(pc300_t * card)
2351{
2352 int ch;
2353
2354 for (ch = 0; ch < card->hw.nchan; ch++) {
2355 pc300ch_t *chan = &card->chan[ch];
2356 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
2357
2358 if (conf->media == IF_IFACE_T1) {
2359 falc_t1_intr(card, ch);
2360 } else {
2361 falc_e1_intr(card, ch);
2362 }
2363 }
2364}
2365
2366static irqreturn_t cpc_intr(int irq, void *dev_id, struct pt_regs *regs)
2367{
2368 pc300_t *card;
2369 volatile ucchar plx_status;
2370
2371 if ((card = (pc300_t *) dev_id) == 0) {
2372#ifdef PC300_DEBUG_INTR
2373 printk("cpc_intr: spurious intr %d\n", irq);
2374#endif
2375 return IRQ_NONE; /* spurious intr */
2376 }
2377
2378 if (card->hw.rambase == 0) {
2379#ifdef PC300_DEBUG_INTR
2380 printk("cpc_intr: spurious intr2 %d\n", irq);
2381#endif
2382 return IRQ_NONE; /* spurious intr */
2383 }
2384
2385 switch (card->hw.type) {
2386 case PC300_RSV:
2387 case PC300_X21:
2388 sca_intr(card);
2389 break;
2390
2391 case PC300_TE:
2392 while ( (plx_status = (cpc_readb(card->hw.plxbase + card->hw.intctl_reg) &
2393 (PLX_9050_LINT1_STATUS | PLX_9050_LINT2_STATUS))) != 0) {
2394 if (plx_status & PLX_9050_LINT1_STATUS) { /* SCA Interrupt */
2395 sca_intr(card);
2396 }
2397 if (plx_status & PLX_9050_LINT2_STATUS) { /* FALC Interrupt */
2398 falc_intr(card);
2399 }
2400 }
2401 break;
2402 }
2403 return IRQ_HANDLED;
2404}
2405
2406void cpc_sca_status(pc300_t * card, int ch)
2407{
2408 ucchar ilar;
2409 void __iomem *scabase = card->hw.scabase;
2410 unsigned long flags;
2411
2412 tx_dma_buf_check(card, ch);
2413 rx_dma_buf_check(card, ch);
2414 ilar = cpc_readb(scabase + ILAR);
2415 printk ("ILAR=0x%02x, WCRL=0x%02x, PCR=0x%02x, BTCR=0x%02x, BOLR=0x%02x\n",
2416 ilar, cpc_readb(scabase + WCRL), cpc_readb(scabase + PCR),
2417 cpc_readb(scabase + BTCR), cpc_readb(scabase + BOLR));
2418 printk("TX_CDA=0x%08x, TX_EDA=0x%08x\n",
2419 cpc_readl(scabase + DTX_REG(CDAL, ch)),
2420 cpc_readl(scabase + DTX_REG(EDAL, ch)));
2421 printk("RX_CDA=0x%08x, RX_EDA=0x%08x, BFL=0x%04x\n",
2422 cpc_readl(scabase + DRX_REG(CDAL, ch)),
2423 cpc_readl(scabase + DRX_REG(EDAL, ch)),
2424 cpc_readw(scabase + DRX_REG(BFLL, ch)));
2425 printk("DMER=0x%02x, DSR_TX=0x%02x, DSR_RX=0x%02x\n",
2426 cpc_readb(scabase + DMER), cpc_readb(scabase + DSR_TX(ch)),
2427 cpc_readb(scabase + DSR_RX(ch)));
2428 printk("DMR_TX=0x%02x, DMR_RX=0x%02x, DIR_TX=0x%02x, DIR_RX=0x%02x\n",
2429 cpc_readb(scabase + DMR_TX(ch)), cpc_readb(scabase + DMR_RX(ch)),
2430 cpc_readb(scabase + DIR_TX(ch)),
2431 cpc_readb(scabase + DIR_RX(ch)));
2432 printk("DCR_TX=0x%02x, DCR_RX=0x%02x, FCT_TX=0x%02x, FCT_RX=0x%02x\n",
2433 cpc_readb(scabase + DCR_TX(ch)), cpc_readb(scabase + DCR_RX(ch)),
2434 cpc_readb(scabase + FCT_TX(ch)),
2435 cpc_readb(scabase + FCT_RX(ch)));
2436 printk("MD0=0x%02x, MD1=0x%02x, MD2=0x%02x, MD3=0x%02x, IDL=0x%02x\n",
2437 cpc_readb(scabase + M_REG(MD0, ch)),
2438 cpc_readb(scabase + M_REG(MD1, ch)),
2439 cpc_readb(scabase + M_REG(MD2, ch)),
2440 cpc_readb(scabase + M_REG(MD3, ch)),
2441 cpc_readb(scabase + M_REG(IDL, ch)));
2442 printk("CMD=0x%02x, SA0=0x%02x, SA1=0x%02x, TFN=0x%02x, CTL=0x%02x\n",
2443 cpc_readb(scabase + M_REG(CMD, ch)),
2444 cpc_readb(scabase + M_REG(SA0, ch)),
2445 cpc_readb(scabase + M_REG(SA1, ch)),
2446 cpc_readb(scabase + M_REG(TFN, ch)),
2447 cpc_readb(scabase + M_REG(CTL, ch)));
2448 printk("ST0=0x%02x, ST1=0x%02x, ST2=0x%02x, ST3=0x%02x, ST4=0x%02x\n",
2449 cpc_readb(scabase + M_REG(ST0, ch)),
2450 cpc_readb(scabase + M_REG(ST1, ch)),
2451 cpc_readb(scabase + M_REG(ST2, ch)),
2452 cpc_readb(scabase + M_REG(ST3, ch)),
2453 cpc_readb(scabase + M_REG(ST4, ch)));
2454 printk ("CST0=0x%02x, CST1=0x%02x, CST2=0x%02x, CST3=0x%02x, FST=0x%02x\n",
2455 cpc_readb(scabase + M_REG(CST0, ch)),
2456 cpc_readb(scabase + M_REG(CST1, ch)),
2457 cpc_readb(scabase + M_REG(CST2, ch)),
2458 cpc_readb(scabase + M_REG(CST3, ch)),
2459 cpc_readb(scabase + M_REG(FST, ch)));
2460 printk("TRC0=0x%02x, TRC1=0x%02x, RRC=0x%02x, TBN=0x%02x, RBN=0x%02x\n",
2461 cpc_readb(scabase + M_REG(TRC0, ch)),
2462 cpc_readb(scabase + M_REG(TRC1, ch)),
2463 cpc_readb(scabase + M_REG(RRC, ch)),
2464 cpc_readb(scabase + M_REG(TBN, ch)),
2465 cpc_readb(scabase + M_REG(RBN, ch)));
2466 printk("TFS=0x%02x, TNR0=0x%02x, TNR1=0x%02x, RNR=0x%02x\n",
2467 cpc_readb(scabase + M_REG(TFS, ch)),
2468 cpc_readb(scabase + M_REG(TNR0, ch)),
2469 cpc_readb(scabase + M_REG(TNR1, ch)),
2470 cpc_readb(scabase + M_REG(RNR, ch)));
2471 printk("TCR=0x%02x, RCR=0x%02x, TNR1=0x%02x, RNR=0x%02x\n",
2472 cpc_readb(scabase + M_REG(TCR, ch)),
2473 cpc_readb(scabase + M_REG(RCR, ch)),
2474 cpc_readb(scabase + M_REG(TNR1, ch)),
2475 cpc_readb(scabase + M_REG(RNR, ch)));
2476 printk("TXS=0x%02x, RXS=0x%02x, EXS=0x%02x, TMCT=0x%02x, TMCR=0x%02x\n",
2477 cpc_readb(scabase + M_REG(TXS, ch)),
2478 cpc_readb(scabase + M_REG(RXS, ch)),
2479 cpc_readb(scabase + M_REG(EXS, ch)),
2480 cpc_readb(scabase + M_REG(TMCT, ch)),
2481 cpc_readb(scabase + M_REG(TMCR, ch)));
2482 printk("IE0=0x%02x, IE1=0x%02x, IE2=0x%02x, IE4=0x%02x, FIE=0x%02x\n",
2483 cpc_readb(scabase + M_REG(IE0, ch)),
2484 cpc_readb(scabase + M_REG(IE1, ch)),
2485 cpc_readb(scabase + M_REG(IE2, ch)),
2486 cpc_readb(scabase + M_REG(IE4, ch)),
2487 cpc_readb(scabase + M_REG(FIE, ch)));
2488 printk("IER0=0x%08x\n", cpc_readl(scabase + IER0));
2489
2490 if (ilar != 0) {
2491 CPC_LOCK(card, flags);
2492 cpc_writeb(scabase + ILAR, ilar);
2493 cpc_writeb(scabase + DMER, 0x80);
2494 CPC_UNLOCK(card, flags);
2495 }
2496}
2497
2498void cpc_falc_status(pc300_t * card, int ch)
2499{
2500 pc300ch_t *chan = &card->chan[ch];
2501 falc_t *pfalc = (falc_t *) & chan->falc;
2502 unsigned long flags;
2503
2504 CPC_LOCK(card, flags);
2505 printk("CH%d: %s %s %d channels\n",
2506 ch, (pfalc->sync ? "SYNC" : ""), (pfalc->active ? "ACTIVE" : ""),
2507 pfalc->num_channels);
2508
2509 printk(" pden=%d, los=%d, losr=%d, lfa=%d, farec=%d\n",
2510 pfalc->pden, pfalc->los, pfalc->losr, pfalc->lfa, pfalc->farec);
2511 printk(" lmfa=%d, ais=%d, sec=%d, es=%d, rai=%d\n",
2512 pfalc->lmfa, pfalc->ais, pfalc->sec, pfalc->es, pfalc->rai);
2513 printk(" bec=%d, fec=%d, cvc=%d, cec=%d, ebc=%d\n",
2514 pfalc->bec, pfalc->fec, pfalc->cvc, pfalc->cec, pfalc->ebc);
2515
2516 printk("\n");
2517 printk(" STATUS: %s %s %s %s %s %s\n",
2518 (pfalc->red_alarm ? "RED" : ""),
2519 (pfalc->blue_alarm ? "BLU" : ""),
2520 (pfalc->yellow_alarm ? "YEL" : ""),
2521 (pfalc->loss_fa ? "LFA" : ""),
2522 (pfalc->loss_mfa ? "LMF" : ""), (pfalc->prbs ? "PRB" : ""));
2523 CPC_UNLOCK(card, flags);
2524}
2525
2526int cpc_change_mtu(struct net_device *dev, int new_mtu)
2527{
2528 if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU))
2529 return -EINVAL;
2530 dev->mtu = new_mtu;
2531 return 0;
2532}
2533
2534int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2535{
2536 hdlc_device *hdlc = dev_to_hdlc(dev);
2537 pc300dev_t *d = (pc300dev_t *) dev->priv;
2538 pc300ch_t *chan = (pc300ch_t *) d->chan;
2539 pc300_t *card = (pc300_t *) chan->card;
2540 pc300conf_t conf_aux;
2541 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
2542 int ch = chan->channel;
2543 void __user *arg = ifr->ifr_data;
2544 struct if_settings *settings = &ifr->ifr_settings;
2545 void __iomem *scabase = card->hw.scabase;
2546
2547 if (!capable(CAP_NET_ADMIN))
2548 return -EPERM;
2549
2550 switch (cmd) {
2551 case SIOCGPC300CONF:
2552#ifdef CONFIG_PC300_MLPPP
2553 if (conf->proto != PC300_PROTO_MLPPP) {
2554 conf->proto = hdlc->proto.id;
2555 }
2556#else
2557 conf->proto = hdlc->proto.id;
2558#endif
2559 memcpy(&conf_aux.conf, conf, sizeof(pc300chconf_t));
2560 memcpy(&conf_aux.hw, &card->hw, sizeof(pc300hw_t));
2561 if (!arg ||
2562 copy_to_user(arg, &conf_aux, sizeof(pc300conf_t)))
2563 return -EINVAL;
2564 return 0;
2565 case SIOCSPC300CONF:
2566 if (!capable(CAP_NET_ADMIN))
2567 return -EPERM;
2568 if (!arg ||
2569 copy_from_user(&conf_aux.conf, arg, sizeof(pc300chconf_t)))
2570 return -EINVAL;
2571 if (card->hw.cpld_id < 0x02 &&
2572 conf_aux.conf.fr_mode == PC300_FR_UNFRAMED) {
2573 /* CPLD_ID < 0x02 doesn't support Unframed E1 */
2574 return -EINVAL;
2575 }
2576#ifdef CONFIG_PC300_MLPPP
2577 if (conf_aux.conf.proto == PC300_PROTO_MLPPP) {
2578 if (conf->proto != PC300_PROTO_MLPPP) {
2579 memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t));
2580 cpc_tty_init(d); /* init TTY driver */
2581 }
2582 } else {
2583 if (conf_aux.conf.proto == 0xffff) {
2584 if (conf->proto == PC300_PROTO_MLPPP){
2585 /* ifdown interface */
2586 cpc_close(dev);
2587 }
2588 } else {
2589 memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t));
2590 hdlc->proto.id = conf->proto;
2591 }
2592 }
2593#else
2594 memcpy(conf, &conf_aux.conf, sizeof(pc300chconf_t));
2595 hdlc->proto.id = conf->proto;
2596#endif
2597 return 0;
2598 case SIOCGPC300STATUS:
2599 cpc_sca_status(card, ch);
2600 return 0;
2601 case SIOCGPC300FALCSTATUS:
2602 cpc_falc_status(card, ch);
2603 return 0;
2604
2605 case SIOCGPC300UTILSTATS:
2606 {
2607 if (!arg) { /* clear statistics */
2608 memset(&hdlc->stats, 0, sizeof(struct net_device_stats));
2609 if (card->hw.type == PC300_TE) {
2610 memset(&chan->falc, 0, sizeof(falc_t));
2611 }
2612 } else {
2613 pc300stats_t pc300stats;
2614
2615 memset(&pc300stats, 0, sizeof(pc300stats_t));
2616 pc300stats.hw_type = card->hw.type;
2617 pc300stats.line_on = card->chan[ch].d.line_on;
2618 pc300stats.line_off = card->chan[ch].d.line_off;
2619 memcpy(&pc300stats.gen_stats, &hdlc->stats,
2620 sizeof(struct net_device_stats));
2621 if (card->hw.type == PC300_TE)
2622 memcpy(&pc300stats.te_stats,&chan->falc,sizeof(falc_t));
2623 if (copy_to_user(arg, &pc300stats, sizeof(pc300stats_t)))
2624 return -EFAULT;
2625 }
2626 return 0;
2627 }
2628
2629 case SIOCGPC300UTILSTATUS:
2630 {
2631 struct pc300status pc300status;
2632
2633 pc300status.hw_type = card->hw.type;
2634 if (card->hw.type == PC300_TE) {
2635 pc300status.te_status.sync = chan->falc.sync;
2636 pc300status.te_status.red_alarm = chan->falc.red_alarm;
2637 pc300status.te_status.blue_alarm = chan->falc.blue_alarm;
2638 pc300status.te_status.loss_fa = chan->falc.loss_fa;
2639 pc300status.te_status.yellow_alarm =chan->falc.yellow_alarm;
2640 pc300status.te_status.loss_mfa = chan->falc.loss_mfa;
2641 pc300status.te_status.prbs = chan->falc.prbs;
2642 } else {
2643 pc300status.gen_status.dcd =
2644 !(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_DCD);
2645 pc300status.gen_status.cts =
2646 !(cpc_readb (scabase + M_REG(ST3, ch)) & ST3_CTS);
2647 pc300status.gen_status.rts =
2648 !(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_RTS);
2649 pc300status.gen_status.dtr =
2650 !(cpc_readb (scabase + M_REG(CTL, ch)) & CTL_DTR);
2651 /* There is no DSR in HD64572 */
2652 }
2653 if (!arg
2654 || copy_to_user(arg, &pc300status, sizeof(pc300status_t)))
2655 return -EINVAL;
2656 return 0;
2657 }
2658
2659 case SIOCSPC300TRACE:
2660 /* Sets/resets a trace_flag for the respective device */
2661 if (!arg || copy_from_user(&d->trace_on, arg,sizeof(unsigned char)))
2662 return -EINVAL;
2663 return 0;
2664
2665 case SIOCSPC300LOOPBACK:
2666 {
2667 struct pc300loopback pc300loop;
2668
2669 /* TE boards only */
2670 if (card->hw.type != PC300_TE)
2671 return -EINVAL;
2672
2673 if (!arg ||
2674 copy_from_user(&pc300loop, arg, sizeof(pc300loopback_t)))
2675 return -EINVAL;
2676 switch (pc300loop.loop_type) {
2677 case PC300LOCLOOP: /* Turn the local loop on/off */
2678 falc_local_loop(card, ch, pc300loop.loop_on);
2679 return 0;
2680
2681 case PC300REMLOOP: /* Turn the remote loop on/off */
2682 falc_remote_loop(card, ch, pc300loop.loop_on);
2683 return 0;
2684
2685 case PC300PAYLOADLOOP: /* Turn the payload loop on/off */
2686 falc_payload_loop(card, ch, pc300loop.loop_on);
2687 return 0;
2688
2689 case PC300GENLOOPUP: /* Generate loop UP */
2690 if (pc300loop.loop_on) {
2691 falc_generate_loop_up_code (card, ch);
2692 } else {
2693 turn_off_xlu(card, ch);
2694 }
2695 return 0;
2696
2697 case PC300GENLOOPDOWN: /* Generate loop DOWN */
2698 if (pc300loop.loop_on) {
2699 falc_generate_loop_down_code (card, ch);
2700 } else {
2701 turn_off_xld(card, ch);
2702 }
2703 return 0;
2704
2705 default:
2706 return -EINVAL;
2707 }
2708 }
2709
2710 case SIOCSPC300PATTERNTEST:
2711 /* Turn the pattern test on/off and show the errors counter */
2712 {
2713 struct pc300patterntst pc300patrntst;
2714
2715 /* TE boards only */
2716 if (card->hw.type != PC300_TE)
2717 return -EINVAL;
2718
2719 if (card->hw.cpld_id < 0x02) {
2720 /* CPLD_ID < 0x02 doesn't support pattern test */
2721 return -EINVAL;
2722 }
2723
2724 if (!arg ||
2725 copy_from_user(&pc300patrntst,arg,sizeof(pc300patterntst_t)))
2726 return -EINVAL;
2727 if (pc300patrntst.patrntst_on == 2) {
2728 if (chan->falc.prbs == 0) {
2729 falc_pattern_test(card, ch, 1);
2730 }
2731 pc300patrntst.num_errors =
2732 falc_pattern_test_error(card, ch);
2733 if (!arg
2734 || copy_to_user(arg, &pc300patrntst,
2735 sizeof (pc300patterntst_t)))
2736 return -EINVAL;
2737 } else {
2738 falc_pattern_test(card, ch, pc300patrntst.patrntst_on);
2739 }
2740 return 0;
2741 }
2742
2743 case SIOCWANDEV:
2744 switch (ifr->ifr_settings.type) {
2745 case IF_GET_IFACE:
2746 {
2747 const size_t size = sizeof(sync_serial_settings);
2748 ifr->ifr_settings.type = conf->media;
2749 if (ifr->ifr_settings.size < size) {
2750 /* data size wanted */
2751 ifr->ifr_settings.size = size;
2752 return -ENOBUFS;
2753 }
2754
2755 if (copy_to_user(settings->ifs_ifsu.sync,
2756 &conf->phys_settings, size)) {
2757 return -EFAULT;
2758 }
2759 return 0;
2760 }
2761
2762 case IF_IFACE_V35:
2763 case IF_IFACE_V24:
2764 case IF_IFACE_X21:
2765 {
2766 const size_t size = sizeof(sync_serial_settings);
2767
2768 if (!capable(CAP_NET_ADMIN)) {
2769 return -EPERM;
2770 }
2771 /* incorrect data len? */
2772 if (ifr->ifr_settings.size != size) {
2773 return -ENOBUFS;
2774 }
2775
2776 if (copy_from_user(&conf->phys_settings,
2777 settings->ifs_ifsu.sync, size)) {
2778 return -EFAULT;
2779 }
2780
2781 if (conf->phys_settings.loopback) {
2782 cpc_writeb(card->hw.scabase + M_REG(MD2, ch),
2783 cpc_readb(card->hw.scabase + M_REG(MD2, ch)) |
2784 MD2_LOOP_MIR);
2785 }
2786 conf->media = ifr->ifr_settings.type;
2787 return 0;
2788 }
2789
2790 case IF_IFACE_T1:
2791 case IF_IFACE_E1:
2792 {
2793 const size_t te_size = sizeof(te1_settings);
2794 const size_t size = sizeof(sync_serial_settings);
2795
2796 if (!capable(CAP_NET_ADMIN)) {
2797 return -EPERM;
2798 }
2799
2800 /* incorrect data len? */
2801 if (ifr->ifr_settings.size != te_size) {
2802 return -ENOBUFS;
2803 }
2804
2805 if (copy_from_user(&conf->phys_settings,
2806 settings->ifs_ifsu.te1, size)) {
2807 return -EFAULT;
2808 }/* Ignoring HDLC slot_map for a while */
2809
2810 if (conf->phys_settings.loopback) {
2811 cpc_writeb(card->hw.scabase + M_REG(MD2, ch),
2812 cpc_readb(card->hw.scabase + M_REG(MD2, ch)) |
2813 MD2_LOOP_MIR);
2814 }
2815 conf->media = ifr->ifr_settings.type;
2816 return 0;
2817 }
2818 default:
2819 return hdlc_ioctl(dev, ifr, cmd);
2820 }
2821
2822 default:
2823 return hdlc_ioctl(dev, ifr, cmd);
2824 }
2825}
2826
2827static struct net_device_stats *cpc_get_stats(struct net_device *dev)
2828{
2829 return hdlc_stats(dev);
2830}
2831
2832static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
2833{
2834 int br, tc;
2835 int br_pwr, error;
2836
2837 if (rate == 0)
2838 return (0);
2839
2840 for (br = 0, br_pwr = 1; br <= 9; br++, br_pwr <<= 1) {
2841 if ((tc = clock / br_pwr / rate) <= 0xff) {
2842 *br_io = br;
2843 break;
2844 }
2845 }
2846
2847 if (tc <= 0xff) {
2848 error = ((rate - (clock / br_pwr / rate)) / rate) * 1000;
2849 /* Errors bigger than +/- 1% won't be tolerated */
2850 if (error < -10 || error > 10)
2851 return (-1);
2852 else
2853 return (tc);
2854 } else {
2855 return (-1);
2856 }
2857}
2858
2859int ch_config(pc300dev_t * d)
2860{
2861 pc300ch_t *chan = (pc300ch_t *) d->chan;
2862 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
2863 pc300_t *card = (pc300_t *) chan->card;
2864 void __iomem *scabase = card->hw.scabase;
2865 void __iomem *plxbase = card->hw.plxbase;
2866 int ch = chan->channel;
2867 uclong clkrate = chan->conf.phys_settings.clock_rate;
2868 uclong clktype = chan->conf.phys_settings.clock_type;
2869 ucshort encoding = chan->conf.proto_settings.encoding;
2870 ucshort parity = chan->conf.proto_settings.parity;
2871 int tmc, br;
2872 ucchar md0, md2;
2873
2874 /* Reset the channel */
2875 cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST);
2876
2877 /* Configure the SCA registers */
2878 switch (parity) {
2879 case PARITY_NONE:
2880 md0 = MD0_BIT_SYNC;
2881 break;
2882 case PARITY_CRC16_PR0:
2883 md0 = MD0_CRC16_0|MD0_CRCC0|MD0_BIT_SYNC;
2884 break;
2885 case PARITY_CRC16_PR1:
2886 md0 = MD0_CRC16_1|MD0_CRCC0|MD0_BIT_SYNC;
2887 break;
2888 case PARITY_CRC32_PR1_CCITT:
2889 md0 = MD0_CRC32|MD0_CRCC0|MD0_BIT_SYNC;
2890 break;
2891 case PARITY_CRC16_PR1_CCITT:
2892 default:
2893 md0 = MD0_CRC_CCITT|MD0_CRCC0|MD0_BIT_SYNC;
2894 break;
2895 }
2896 switch (encoding) {
2897 case ENCODING_NRZI:
2898 md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZI;
2899 break;
2900 case ENCODING_FM_MARK: /* FM1 */
2901 md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM1;
2902 break;
2903 case ENCODING_FM_SPACE: /* FM0 */
2904 md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_FM0;
2905 break;
2906 case ENCODING_MANCHESTER: /* It's not working... */
2907 md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_FM|MD2_MANCH;
2908 break;
2909 case ENCODING_NRZ:
2910 default:
2911 md2 = MD2_F_DUPLEX|MD2_ADPLL_X8|MD2_NRZ;
2912 break;
2913 }
2914 cpc_writeb(scabase + M_REG(MD0, ch), md0);
2915 cpc_writeb(scabase + M_REG(MD1, ch), 0);
2916 cpc_writeb(scabase + M_REG(MD2, ch), md2);
2917 cpc_writeb(scabase + M_REG(IDL, ch), 0x7e);
2918 cpc_writeb(scabase + M_REG(CTL, ch), CTL_URSKP | CTL_IDLC);
2919
2920 /* Configure HW media */
2921 switch (card->hw.type) {
2922 case PC300_RSV:
2923 if (conf->media == IF_IFACE_V35) {
2924 cpc_writel((plxbase + card->hw.gpioc_reg),
2925 cpc_readl(plxbase + card->hw.gpioc_reg) | PC300_CHMEDIA_MASK(ch));
2926 } else {
2927 cpc_writel((plxbase + card->hw.gpioc_reg),
2928 cpc_readl(plxbase + card->hw.gpioc_reg) & ~PC300_CHMEDIA_MASK(ch));
2929 }
2930 break;
2931
2932 case PC300_X21:
2933 break;
2934
2935 case PC300_TE:
2936 te_config(card, ch);
2937 break;
2938 }
2939
2940 switch (card->hw.type) {
2941 case PC300_RSV:
2942 case PC300_X21:
2943 if (clktype == CLOCK_INT || clktype == CLOCK_TXINT) {
2944 /* Calculate the clkrate parameters */
2945 tmc = clock_rate_calc(clkrate, card->hw.clock, &br);
2946 cpc_writeb(scabase + M_REG(TMCT, ch), tmc);
2947 cpc_writeb(scabase + M_REG(TXS, ch),
2948 (TXS_DTRXC | TXS_IBRG | br));
2949 if (clktype == CLOCK_INT) {
2950 cpc_writeb(scabase + M_REG(TMCR, ch), tmc);
2951 cpc_writeb(scabase + M_REG(RXS, ch),
2952 (RXS_IBRG | br));
2953 } else {
2954 cpc_writeb(scabase + M_REG(TMCR, ch), 1);
2955 cpc_writeb(scabase + M_REG(RXS, ch), 0);
2956 }
2957 if (card->hw.type == PC300_X21) {
2958 cpc_writeb(scabase + M_REG(GPO, ch), 1);
2959 cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1);
2960 } else {
2961 cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1);
2962 }
2963 } else {
2964 cpc_writeb(scabase + M_REG(TMCT, ch), 1);
2965 if (clktype == CLOCK_EXT) {
2966 cpc_writeb(scabase + M_REG(TXS, ch),
2967 TXS_DTRXC);
2968 } else {
2969 cpc_writeb(scabase + M_REG(TXS, ch),
2970 TXS_DTRXC|TXS_RCLK);
2971 }
2972 cpc_writeb(scabase + M_REG(TMCR, ch), 1);
2973 cpc_writeb(scabase + M_REG(RXS, ch), 0);
2974 if (card->hw.type == PC300_X21) {
2975 cpc_writeb(scabase + M_REG(GPO, ch), 0);
2976 cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1 | EXS_RES1);
2977 } else {
2978 cpc_writeb(scabase + M_REG(EXS, ch), EXS_TES1);
2979 }
2980 }
2981 break;
2982
2983 case PC300_TE:
2984 /* SCA always receives clock from the FALC chip */
2985 cpc_writeb(scabase + M_REG(TMCT, ch), 1);
2986 cpc_writeb(scabase + M_REG(TXS, ch), 0);
2987 cpc_writeb(scabase + M_REG(TMCR, ch), 1);
2988 cpc_writeb(scabase + M_REG(RXS, ch), 0);
2989 cpc_writeb(scabase + M_REG(EXS, ch), 0);
2990 break;
2991 }
2992
2993 /* Enable Interrupts */
2994 cpc_writel(scabase + IER0,
2995 cpc_readl(scabase + IER0) |
2996 IR0_M(IR0_RXINTA, ch) |
2997 IR0_DRX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch) |
2998 IR0_DTX(IR0_EFT | IR0_DMIA | IR0_DMIB, ch));
2999 cpc_writeb(scabase + M_REG(IE0, ch),
3000 cpc_readl(scabase + M_REG(IE0, ch)) | IE0_RXINTA);
3001 cpc_writeb(scabase + M_REG(IE1, ch),
3002 cpc_readl(scabase + M_REG(IE1, ch)) | IE1_CDCD);
3003
3004 return 0;
3005}
3006
3007int rx_config(pc300dev_t * d)
3008{
3009 pc300ch_t *chan = (pc300ch_t *) d->chan;
3010 pc300_t *card = (pc300_t *) chan->card;
3011 void __iomem *scabase = card->hw.scabase;
3012 int ch = chan->channel;
3013
3014 cpc_writeb(scabase + DSR_RX(ch), 0);
3015
3016 /* General RX settings */
3017 cpc_writeb(scabase + M_REG(RRC, ch), 0);
3018 cpc_writeb(scabase + M_REG(RNR, ch), 16);
3019
3020 /* Enable reception */
3021 cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_CRC_INIT);
3022 cpc_writeb(scabase + M_REG(CMD, ch), CMD_RX_ENA);
3023
3024 /* Initialize DMA stuff */
3025 chan->rx_first_bd = 0;
3026 chan->rx_last_bd = N_DMA_RX_BUF - 1;
3027 rx_dma_buf_init(card, ch);
3028 cpc_writeb(scabase + DCR_RX(ch), DCR_FCT_CLR);
3029 cpc_writeb(scabase + DMR_RX(ch), (DMR_TMOD | DMR_NF));
3030 cpc_writeb(scabase + DIR_RX(ch), (DIR_EOM | DIR_BOF));
3031
3032 /* Start DMA */
3033 rx_dma_start(card, ch);
3034
3035 return 0;
3036}
3037
3038int tx_config(pc300dev_t * d)
3039{
3040 pc300ch_t *chan = (pc300ch_t *) d->chan;
3041 pc300_t *card = (pc300_t *) chan->card;
3042 void __iomem *scabase = card->hw.scabase;
3043 int ch = chan->channel;
3044
3045 cpc_writeb(scabase + DSR_TX(ch), 0);
3046
3047 /* General TX settings */
3048 cpc_writeb(scabase + M_REG(TRC0, ch), 0);
3049 cpc_writeb(scabase + M_REG(TFS, ch), 32);
3050 cpc_writeb(scabase + M_REG(TNR0, ch), 20);
3051 cpc_writeb(scabase + M_REG(TNR1, ch), 48);
3052 cpc_writeb(scabase + M_REG(TCR, ch), 8);
3053
3054 /* Enable transmission */
3055 cpc_writeb(scabase + M_REG(CMD, ch), CMD_TX_CRC_INIT);
3056
3057 /* Initialize DMA stuff */
3058 chan->tx_first_bd = 0;
3059 chan->tx_next_bd = 0;
3060 tx_dma_buf_init(card, ch);
3061 cpc_writeb(scabase + DCR_TX(ch), DCR_FCT_CLR);
3062 cpc_writeb(scabase + DMR_TX(ch), (DMR_TMOD | DMR_NF));
3063 cpc_writeb(scabase + DIR_TX(ch), (DIR_EOM | DIR_BOF | DIR_UDRF));
3064 cpc_writel(scabase + DTX_REG(CDAL, ch), TX_BD_ADDR(ch, chan->tx_first_bd));
3065 cpc_writel(scabase + DTX_REG(EDAL, ch), TX_BD_ADDR(ch, chan->tx_next_bd));
3066
3067 return 0;
3068}
3069
3070static int cpc_attach(struct net_device *dev, unsigned short encoding,
3071 unsigned short parity)
3072{
3073 pc300dev_t *d = (pc300dev_t *)dev->priv;
3074 pc300ch_t *chan = (pc300ch_t *)d->chan;
3075 pc300_t *card = (pc300_t *)chan->card;
3076 pc300chconf_t *conf = (pc300chconf_t *)&chan->conf;
3077
3078 if (card->hw.type == PC300_TE) {
3079 if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI) {
3080 return -EINVAL;
3081 }
3082 } else {
3083 if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI &&
3084 encoding != ENCODING_FM_MARK && encoding != ENCODING_FM_SPACE) {
3085 /* Driver doesn't support ENCODING_MANCHESTER yet */
3086 return -EINVAL;
3087 }
3088 }
3089
3090 if (parity != PARITY_NONE && parity != PARITY_CRC16_PR0 &&
3091 parity != PARITY_CRC16_PR1 && parity != PARITY_CRC32_PR1_CCITT &&
3092 parity != PARITY_CRC16_PR1_CCITT) {
3093 return -EINVAL;
3094 }
3095
3096 conf->proto_settings.encoding = encoding;
3097 conf->proto_settings.parity = parity;
3098 return 0;
3099}
3100
3101void cpc_opench(pc300dev_t * d)
3102{
3103 pc300ch_t *chan = (pc300ch_t *) d->chan;
3104 pc300_t *card = (pc300_t *) chan->card;
3105 int ch = chan->channel;
3106 void __iomem *scabase = card->hw.scabase;
3107
3108 ch_config(d);
3109
3110 rx_config(d);
3111
3112 tx_config(d);
3113
3114 /* Assert RTS and DTR */
3115 cpc_writeb(scabase + M_REG(CTL, ch),
3116 cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR));
3117}
3118
3119void cpc_closech(pc300dev_t * d)
3120{
3121 pc300ch_t *chan = (pc300ch_t *) d->chan;
3122 pc300_t *card = (pc300_t *) chan->card;
3123 falc_t *pfalc = (falc_t *) & chan->falc;
3124 int ch = chan->channel;
3125
3126 cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_CH_RST);
3127 rx_dma_stop(card, ch);
3128 tx_dma_stop(card, ch);
3129
3130 if (card->hw.type == PC300_TE) {
3131 memset(pfalc, 0, sizeof(falc_t));
3132 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
3133 cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
3134 ~((CPLD_REG2_FALC_TX_CLK | CPLD_REG2_FALC_RX_CLK |
3135 CPLD_REG2_FALC_LED2) << (2 * ch)));
3136 /* Reset the FALC chip */
3137 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
3138 cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
3139 (CPLD_REG1_FALC_RESET << (2 * ch)));
3140 udelay(10000);
3141 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
3142 cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) &
3143 ~(CPLD_REG1_FALC_RESET << (2 * ch)));
3144 }
3145}
3146
3147int cpc_open(struct net_device *dev)
3148{
3149 hdlc_device *hdlc = dev_to_hdlc(dev);
3150 pc300dev_t *d = (pc300dev_t *) dev->priv;
3151 struct ifreq ifr;
3152 int result;
3153
3154#ifdef PC300_DEBUG_OTHER
3155 printk("pc300: cpc_open");
3156#endif
3157
3158 if (hdlc->proto.id == IF_PROTO_PPP) {
3159 d->if_ptr = &hdlc->state.ppp.pppdev;
3160 }
3161
3162 result = hdlc_open(dev);
3163 if (hdlc->proto.id == IF_PROTO_PPP) {
3164 dev->priv = d;
3165 }
3166 if (result) {
3167 return result;
3168 }
3169
3170 sprintf(ifr.ifr_name, "%s", dev->name);
3171 cpc_opench(d);
3172 netif_start_queue(dev);
3173 return 0;
3174}
3175
3176int cpc_close(struct net_device *dev)
3177{
3178 hdlc_device *hdlc = dev_to_hdlc(dev);
3179 pc300dev_t *d = (pc300dev_t *) dev->priv;
3180 pc300ch_t *chan = (pc300ch_t *) d->chan;
3181 pc300_t *card = (pc300_t *) chan->card;
3182 unsigned long flags;
3183
3184#ifdef PC300_DEBUG_OTHER
3185 printk("pc300: cpc_close");
3186#endif
3187
3188 netif_stop_queue(dev);
3189
3190 CPC_LOCK(card, flags);
3191 cpc_closech(d);
3192 CPC_UNLOCK(card, flags);
3193
3194 hdlc_close(dev);
3195 if (hdlc->proto.id == IF_PROTO_PPP) {
3196 d->if_ptr = NULL;
3197 }
3198#ifdef CONFIG_PC300_MLPPP
3199 if (chan->conf.proto == PC300_PROTO_MLPPP) {
3200 cpc_tty_unregister_service(d);
3201 chan->conf.proto = 0xffff;
3202 }
3203#endif
3204
3205 return 0;
3206}
3207
3208static uclong detect_ram(pc300_t * card)
3209{
3210 uclong i;
3211 ucchar data;
3212 void __iomem *rambase = card->hw.rambase;
3213
3214 card->hw.ramsize = PC300_RAMSIZE;
3215 /* Let's find out how much RAM is present on this board */
3216 for (i = 0; i < card->hw.ramsize; i++) {
3217 data = (ucchar) (i & 0xff);
3218 cpc_writeb(rambase + i, data);
3219 if (cpc_readb(rambase + i) != data) {
3220 break;
3221 }
3222 }
3223 return (i);
3224}
3225
3226static void plx_init(pc300_t * card)
3227{
3228 struct RUNTIME_9050 __iomem *plx_ctl = card->hw.plxbase;
3229
3230 /* Reset PLX */
3231 cpc_writel(&plx_ctl->init_ctrl,
3232 cpc_readl(&plx_ctl->init_ctrl) | 0x40000000);
3233 udelay(10000L);
3234 cpc_writel(&plx_ctl->init_ctrl,
3235 cpc_readl(&plx_ctl->init_ctrl) & ~0x40000000);
3236
3237 /* Reload Config. Registers from EEPROM */
3238 cpc_writel(&plx_ctl->init_ctrl,
3239 cpc_readl(&plx_ctl->init_ctrl) | 0x20000000);
3240 udelay(10000L);
3241 cpc_writel(&plx_ctl->init_ctrl,
3242 cpc_readl(&plx_ctl->init_ctrl) & ~0x20000000);
3243
3244}
3245
3246static inline void show_version(void)
3247{
3248 char *rcsvers, *rcsdate, *tmp;
3249
3250 rcsvers = strchr(rcsid, ' ');
3251 rcsvers++;
3252 tmp = strchr(rcsvers, ' ');
3253 *tmp++ = '\0';
3254 rcsdate = strchr(tmp, ' ');
3255 rcsdate++;
3256 tmp = strrchr(rcsdate, ' ');
3257 *tmp = '\0';
3258 printk(KERN_INFO "Cyclades-PC300 driver %s %s (built %s %s)\n",
3259 rcsvers, rcsdate, __DATE__, __TIME__);
3260} /* show_version */
3261
3262static void cpc_init_card(pc300_t * card)
3263{
3264 int i, devcount = 0;
3265 static int board_nbr = 1;
3266
3267 /* Enable interrupts on the PCI bridge */
3268 plx_init(card);
3269 cpc_writew(card->hw.plxbase + card->hw.intctl_reg,
3270 cpc_readw(card->hw.plxbase + card->hw.intctl_reg) | 0x0040);
3271
3272#ifdef USE_PCI_CLOCK
3273 /* Set board clock to PCI clock */
3274 cpc_writel(card->hw.plxbase + card->hw.gpioc_reg,
3275 cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) | 0x00000004UL);
3276 card->hw.clock = PC300_PCI_CLOCK;
3277#else
3278 /* Set board clock to internal oscillator clock */
3279 cpc_writel(card->hw.plxbase + card->hw.gpioc_reg,
3280 cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & ~0x00000004UL);
3281 card->hw.clock = PC300_OSC_CLOCK;
3282#endif
3283
3284 /* Detect actual on-board RAM size */
3285 card->hw.ramsize = detect_ram(card);
3286
3287 /* Set Global SCA-II registers */
3288 cpc_writeb(card->hw.scabase + PCR, PCR_PR2);
3289 cpc_writeb(card->hw.scabase + BTCR, 0x10);
3290 cpc_writeb(card->hw.scabase + WCRL, 0);
3291 cpc_writeb(card->hw.scabase + DMER, 0x80);
3292
3293 if (card->hw.type == PC300_TE) {
3294 ucchar reg1;
3295
3296 /* Check CPLD version */
3297 reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1);
3298 cpc_writeb(card->hw.falcbase + CPLD_REG1, (reg1 + 0x5a));
3299 if (cpc_readb(card->hw.falcbase + CPLD_REG1) == reg1) {
3300 /* New CPLD */
3301 card->hw.cpld_id = cpc_readb(card->hw.falcbase + CPLD_ID_REG);
3302 card->hw.cpld_reg1 = CPLD_V2_REG1;
3303 card->hw.cpld_reg2 = CPLD_V2_REG2;
3304 } else {
3305 /* old CPLD */
3306 card->hw.cpld_id = 0;
3307 card->hw.cpld_reg1 = CPLD_REG1;
3308 card->hw.cpld_reg2 = CPLD_REG2;
3309 cpc_writeb(card->hw.falcbase + CPLD_REG1, reg1);
3310 }
3311
3312 /* Enable the board's global clock */
3313 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg1,
3314 cpc_readb(card->hw.falcbase + card->hw.cpld_reg1) |
3315 CPLD_REG1_GLOBAL_CLK);
3316
3317 }
3318
3319 for (i = 0; i < card->hw.nchan; i++) {
3320 pc300ch_t *chan = &card->chan[i];
3321 pc300dev_t *d = &chan->d;
3322 hdlc_device *hdlc;
3323 struct net_device *dev;
3324
3325 chan->card = card;
3326 chan->channel = i;
3327 chan->conf.phys_settings.clock_rate = 0;
3328 chan->conf.phys_settings.clock_type = CLOCK_EXT;
3329 chan->conf.proto_settings.encoding = ENCODING_NRZ;
3330 chan->conf.proto_settings.parity = PARITY_CRC16_PR1_CCITT;
3331 switch (card->hw.type) {
3332 case PC300_TE:
3333 chan->conf.media = IF_IFACE_T1;
3334 chan->conf.lcode = PC300_LC_B8ZS;
3335 chan->conf.fr_mode = PC300_FR_ESF;
3336 chan->conf.lbo = PC300_LBO_0_DB;
3337 chan->conf.rx_sens = PC300_RX_SENS_SH;
3338 chan->conf.tslot_bitmap = 0xffffffffUL;
3339 break;
3340
3341 case PC300_X21:
3342 chan->conf.media = IF_IFACE_X21;
3343 break;
3344
3345 case PC300_RSV:
3346 default:
3347 chan->conf.media = IF_IFACE_V35;
3348 break;
3349 }
3350 chan->conf.proto = IF_PROTO_PPP;
3351 chan->tx_first_bd = 0;
3352 chan->tx_next_bd = 0;
3353 chan->rx_first_bd = 0;
3354 chan->rx_last_bd = N_DMA_RX_BUF - 1;
3355 chan->nfree_tx_bd = N_DMA_TX_BUF;
3356
3357 d->chan = chan;
3358 d->tx_skb = NULL;
3359 d->trace_on = 0;
3360 d->line_on = 0;
3361 d->line_off = 0;
3362
3363 dev = alloc_hdlcdev(NULL);
3364 if (dev == NULL)
3365 continue;
3366
3367 hdlc = dev_to_hdlc(dev);
3368 hdlc->xmit = cpc_queue_xmit;
3369 hdlc->attach = cpc_attach;
3370 d->dev = dev;
3371 dev->mem_start = card->hw.ramphys;
3372 dev->mem_end = card->hw.ramphys + card->hw.ramsize - 1;
3373 dev->irq = card->hw.irq;
3374 dev->init = NULL;
3375 dev->tx_queue_len = PC300_TX_QUEUE_LEN;
3376 dev->mtu = PC300_DEF_MTU;
3377
3378 dev->open = cpc_open;
3379 dev->stop = cpc_close;
3380 dev->tx_timeout = cpc_tx_timeout;
3381 dev->watchdog_timeo = PC300_TX_TIMEOUT;
3382 dev->get_stats = cpc_get_stats;
3383 dev->set_multicast_list = NULL;
3384 dev->set_mac_address = NULL;
3385 dev->change_mtu = cpc_change_mtu;
3386 dev->do_ioctl = cpc_ioctl;
3387
3388 if (register_hdlc_device(dev) == 0) {
3389 dev->priv = d; /* We need 'priv', hdlc doesn't */
3390 printk("%s: Cyclades-PC300/", dev->name);
3391 switch (card->hw.type) {
3392 case PC300_TE:
3393 if (card->hw.bus == PC300_PMC) {
3394 printk("TE-M");
3395 } else {
3396 printk("TE ");
3397 }
3398 break;
3399
3400 case PC300_X21:
3401 printk("X21 ");
3402 break;
3403
3404 case PC300_RSV:
3405 default:
3406 printk("RSV ");
3407 break;
3408 }
3409 printk (" #%d, %dKB of RAM at 0x%08x, IRQ%d, channel %d.\n",
3410 board_nbr, card->hw.ramsize / 1024,
3411 card->hw.ramphys, card->hw.irq, i + 1);
3412 devcount++;
3413 } else {
3414 printk ("Dev%d on card(0x%08x): unable to allocate i/f name.\n",
3415 i + 1, card->hw.ramphys);
3416 free_netdev(dev);
3417 continue;
3418 }
3419 }
3420 spin_lock_init(&card->card_lock);
3421
3422 board_nbr++;
3423}
3424
3425static int __devinit
3426cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3427{
3428 static int first_time = 1;
3429 ucchar cpc_rev_id;
3430 int err = 0, eeprom_outdated = 0;
3431 ucshort device_id;
3432 pc300_t *card;
3433
3434 if (first_time) {
3435 first_time = 0;
3436 show_version();
3437#ifdef CONFIG_PC300_MLPPP
3438 cpc_tty_reset_var();
3439#endif
3440 }
3441
3442 card = (pc300_t *) kmalloc(sizeof(pc300_t), GFP_KERNEL);
3443 if (card == NULL) {
3444 printk("PC300 found at RAM 0x%08lx, "
3445 "but could not allocate card structure.\n",
3446 pci_resource_start(pdev, 3));
3447 return -ENOMEM;
3448 }
3449 memset(card, 0, sizeof(pc300_t));
3450
3451 /* read PCI configuration area */
3452 device_id = ent->device;
3453 card->hw.irq = pdev->irq;
3454 card->hw.iophys = pci_resource_start(pdev, 1);
3455 card->hw.iosize = pci_resource_len(pdev, 1);
3456 card->hw.scaphys = pci_resource_start(pdev, 2);
3457 card->hw.scasize = pci_resource_len(pdev, 2);
3458 card->hw.ramphys = pci_resource_start(pdev, 3);
3459 card->hw.alloc_ramsize = pci_resource_len(pdev, 3);
3460 card->hw.falcphys = pci_resource_start(pdev, 4);
3461 card->hw.falcsize = pci_resource_len(pdev, 4);
3462 card->hw.plxphys = pci_resource_start(pdev, 5);
3463 card->hw.plxsize = pci_resource_len(pdev, 5);
3464 pci_read_config_byte(pdev, PCI_REVISION_ID, &cpc_rev_id);
3465
3466 switch (device_id) {
3467 case PCI_DEVICE_ID_PC300_RX_1:
3468 case PCI_DEVICE_ID_PC300_TE_1:
3469 case PCI_DEVICE_ID_PC300_TE_M_1:
3470 card->hw.nchan = 1;
3471 break;
3472
3473 case PCI_DEVICE_ID_PC300_RX_2:
3474 case PCI_DEVICE_ID_PC300_TE_2:
3475 case PCI_DEVICE_ID_PC300_TE_M_2:
3476 default:
3477 card->hw.nchan = PC300_MAXCHAN;
3478 break;
3479 }
3480#ifdef PC300_DEBUG_PCI
3481 printk("cpc (bus=0x0%x,pci_id=0x%x,", pdev->bus->number, pdev->devfn);
3482 printk("rev_id=%d) IRQ%d\n", cpc_rev_id, card->hw.irq);
3483 printk("cpc:found ramaddr=0x%08lx plxaddr=0x%08lx "
3484 "ctladdr=0x%08lx falcaddr=0x%08lx\n",
3485 card->hw.ramphys, card->hw.plxphys, card->hw.scaphys,
3486 card->hw.falcphys);
3487#endif
3488 /* Although we don't use this I/O region, we should
3489 * request it from the kernel anyway, to avoid problems
3490 * with other drivers accessing it. */
3491 if (!request_region(card->hw.iophys, card->hw.iosize, "PLX Registers")) {
3492 /* In case we can't allocate it, warn user */
3493 printk("WARNING: couldn't allocate I/O region for PC300 board "
3494 "at 0x%08x!\n", card->hw.ramphys);
3495 }
3496
3497 if (card->hw.plxphys) {
3498 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, card->hw.plxphys);
3499 } else {
3500 eeprom_outdated = 1;
3501 card->hw.plxphys = pci_resource_start(pdev, 0);
3502 card->hw.plxsize = pci_resource_len(pdev, 0);
3503 }
3504
3505 if (!request_mem_region(card->hw.plxphys, card->hw.plxsize,
3506 "PLX Registers")) {
3507 printk("PC300 found at RAM 0x%08x, "
3508 "but could not allocate PLX mem region.\n",
3509 card->hw.ramphys);
3510 err = -ENODEV;
3511 goto err_release_io;
3512 }
3513 if (!request_mem_region(card->hw.ramphys, card->hw.alloc_ramsize,
3514 "On-board RAM")) {
3515 printk("PC300 found at RAM 0x%08x, "
3516 "but could not allocate RAM mem region.\n",
3517 card->hw.ramphys);
3518 err = -ENODEV;
3519 goto err_release_plx;
3520 }
3521 if (!request_mem_region(card->hw.scaphys, card->hw.scasize,
3522 "SCA-II Registers")) {
3523 printk("PC300 found at RAM 0x%08x, "
3524 "but could not allocate SCA mem region.\n",
3525 card->hw.ramphys);
3526 err = -ENODEV;
3527 goto err_release_ram;
3528 }
3529
3530 if ((err = pci_enable_device(pdev)) != 0)
3531 goto err_release_sca;
3532
3533 card->hw.plxbase = ioremap(card->hw.plxphys, card->hw.plxsize);
3534 card->hw.rambase = ioremap(card->hw.ramphys, card->hw.alloc_ramsize);
3535 card->hw.scabase = ioremap(card->hw.scaphys, card->hw.scasize);
3536 switch (device_id) {
3537 case PCI_DEVICE_ID_PC300_TE_1:
3538 case PCI_DEVICE_ID_PC300_TE_2:
3539 case PCI_DEVICE_ID_PC300_TE_M_1:
3540 case PCI_DEVICE_ID_PC300_TE_M_2:
3541 request_mem_region(card->hw.falcphys, card->hw.falcsize,
3542 "FALC Registers");
3543 card->hw.falcbase = ioremap(card->hw.falcphys, card->hw.falcsize);
3544 break;
3545
3546 case PCI_DEVICE_ID_PC300_RX_1:
3547 case PCI_DEVICE_ID_PC300_RX_2:
3548 default:
3549 card->hw.falcbase = NULL;
3550 break;
3551 }
3552
3553#ifdef PC300_DEBUG_PCI
3554 printk("cpc: relocate ramaddr=0x%08lx plxaddr=0x%08lx "
3555 "ctladdr=0x%08lx falcaddr=0x%08lx\n",
3556 card->hw.rambase, card->hw.plxbase, card->hw.scabase,
3557 card->hw.falcbase);
3558#endif
3559
3560 /* Set PCI drv pointer to the card structure */
3561 pci_set_drvdata(pdev, card);
3562
3563 /* Set board type */
3564 switch (device_id) {
3565 case PCI_DEVICE_ID_PC300_TE_1:
3566 case PCI_DEVICE_ID_PC300_TE_2:
3567 case PCI_DEVICE_ID_PC300_TE_M_1:
3568 case PCI_DEVICE_ID_PC300_TE_M_2:
3569 card->hw.type = PC300_TE;
3570
3571 if ((device_id == PCI_DEVICE_ID_PC300_TE_M_1) ||
3572 (device_id == PCI_DEVICE_ID_PC300_TE_M_2)) {
3573 card->hw.bus = PC300_PMC;
3574 /* Set PLX register offsets */
3575 card->hw.gpioc_reg = 0x54;
3576 card->hw.intctl_reg = 0x4c;
3577 } else {
3578 card->hw.bus = PC300_PCI;
3579 /* Set PLX register offsets */
3580 card->hw.gpioc_reg = 0x50;
3581 card->hw.intctl_reg = 0x4c;
3582 }
3583 break;
3584
3585 case PCI_DEVICE_ID_PC300_RX_1:
3586 case PCI_DEVICE_ID_PC300_RX_2:
3587 default:
3588 card->hw.bus = PC300_PCI;
3589 /* Set PLX register offsets */
3590 card->hw.gpioc_reg = 0x50;
3591 card->hw.intctl_reg = 0x4c;
3592
3593 if ((cpc_readl(card->hw.plxbase + card->hw.gpioc_reg) & PC300_CTYPE_MASK)) {
3594 card->hw.type = PC300_X21;
3595 } else {
3596 card->hw.type = PC300_RSV;
3597 }
3598 break;
3599 }
3600
3601 /* Allocate IRQ */
3602 if (request_irq(card->hw.irq, cpc_intr, SA_SHIRQ, "Cyclades-PC300", card)) {
3603 printk ("PC300 found at RAM 0x%08x, but could not allocate IRQ%d.\n",
3604 card->hw.ramphys, card->hw.irq);
3605 goto err_io_unmap;
3606 }
3607
3608 cpc_init_card(card);
3609
3610 if (eeprom_outdated)
3611 printk("WARNING: PC300 with outdated EEPROM.\n");
3612 return 0;
3613
3614err_io_unmap:
3615 iounmap(card->hw.plxbase);
3616 iounmap(card->hw.scabase);
3617 iounmap(card->hw.rambase);
3618 if (card->hw.type == PC300_TE) {
3619 iounmap(card->hw.falcbase);
3620 release_mem_region(card->hw.falcphys, card->hw.falcsize);
3621 }
3622err_release_sca:
3623 release_mem_region(card->hw.scaphys, card->hw.scasize);
3624err_release_ram:
3625 release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize);
3626err_release_plx:
3627 release_mem_region(card->hw.plxphys, card->hw.plxsize);
3628err_release_io:
3629 release_region(card->hw.iophys, card->hw.iosize);
3630 kfree(card);
3631 return -ENODEV;
3632}
3633
3634static void __devexit cpc_remove_one(struct pci_dev *pdev)
3635{
3636 pc300_t *card = pci_get_drvdata(pdev);
3637
3638 if (card->hw.rambase != 0) {
3639 int i;
3640
3641 /* Disable interrupts on the PCI bridge */
3642 cpc_writew(card->hw.plxbase + card->hw.intctl_reg,
3643 cpc_readw(card->hw.plxbase + card->hw.intctl_reg) & ~(0x0040));
3644
3645 for (i = 0; i < card->hw.nchan; i++) {
3646 unregister_hdlc_device(card->chan[i].d.dev);
3647 }
3648 iounmap(card->hw.plxbase);
3649 iounmap(card->hw.scabase);
3650 iounmap(card->hw.rambase);
3651 release_mem_region(card->hw.plxphys, card->hw.plxsize);
3652 release_mem_region(card->hw.ramphys, card->hw.alloc_ramsize);
3653 release_mem_region(card->hw.scaphys, card->hw.scasize);
3654 release_region(card->hw.iophys, card->hw.iosize);
3655 if (card->hw.type == PC300_TE) {
3656 iounmap(card->hw.falcbase);
3657 release_mem_region(card->hw.falcphys, card->hw.falcsize);
3658 }
3659 for (i = 0; i < card->hw.nchan; i++)
3660 if (card->chan[i].d.dev)
3661 free_netdev(card->chan[i].d.dev);
3662 if (card->hw.irq)
3663 free_irq(card->hw.irq, card);
3664 kfree(card);
3665 }
3666}
3667
3668static struct pci_driver cpc_driver = {
3669 .name = "pc300",
3670 .id_table = cpc_pci_dev_id,
3671 .probe = cpc_init_one,
3672 .remove = __devexit_p(cpc_remove_one),
3673};
3674
3675static int __init cpc_init(void)
3676{
3677 return pci_module_init(&cpc_driver);
3678}
3679
3680static void __exit cpc_cleanup_module(void)
3681{
3682 pci_unregister_driver(&cpc_driver);
3683}
3684
3685module_init(cpc_init);
3686module_exit(cpc_cleanup_module);
3687
3688MODULE_DESCRIPTION("Cyclades-PC300 cards driver");
3689MODULE_AUTHOR( "Author: Ivan Passos <ivan@cyclades.com>\r\n"
3690 "Maintainer: PC300 Maintainer <pc300@cyclades.com");
3691MODULE_LICENSE("GPL");
3692
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
new file mode 100644
index 000000000000..29f84ad08730
--- /dev/null
+++ b/drivers/net/wan/pc300_tty.c
@@ -0,0 +1,1095 @@
1/*
2 * pc300_tty.c Cyclades-PC300(tm) TTY Driver.
3 *
4 * Author: Regina Kodato <reginak@cyclades.com>
5 *
6 * Copyright: (c) 1999-2002 Cyclades Corp.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * $Log: pc300_tty.c,v $
14 * Revision 3.7 2002/03/07 14:17:09 henrique
15 * License data fixed
16 *
17 * Revision 3.6 2001/12/10 12:29:42 regina
18 * Fix the MLPPP bug
19 *
20 * Revision 3.5 2001/10/31 11:20:05 regina
21 * automatic pppd starts
22 *
23 * Revision 3.4 2001/08/06 12:01:51 regina
24 * problem in DSR_DE bit
25 *
26 * Revision 3.3 2001/07/26 22:58:41 regina
27 * update EDA value
28 *
29 * Revision 3.2 2001/07/12 13:11:20 regina
30 * bug fix - DCD-OFF in pc300 tty driver
31 *
32 * DMA transmission bug fix
33 *
34 * Revision 3.1 2001/06/22 13:13:02 regina
35 * MLPPP implementation
36 *
37 */
38
39#include <linux/module.h>
40#include <linux/kernel.h>
41#include <linux/pci.h>
42#include <linux/errno.h>
43#include <linux/string.h>
44#include <linux/init.h>
45#include <linux/netdevice.h>
46#include <linux/spinlock.h>
47#include <linux/slab.h>
48#include <linux/if.h>
49#include <linux/skbuff.h>
50/* TTY includes */
51#include <linux/tty.h>
52#include <linux/tty_flip.h>
53#include <linux/serial.h>
54
55#include <asm/io.h>
56#include <asm/uaccess.h>
57
58#include "pc300.h"
59
60/* defines and macros */
61/* TTY Global definitions */
62#define CPC_TTY_NPORTS 8 /* maximum number of the sync tty connections */
63#define CPC_TTY_MAJOR CYCLADES_MAJOR
64#define CPC_TTY_MINOR_START 240 /* minor of the first PC300 interface */
65
66#define CPC_TTY_MAX_MTU 2000
67
68/* tty interface state */
69#define CPC_TTY_ST_IDLE 0
70#define CPC_TTY_ST_INIT 1 /* configured with MLPPP and up */
71#define CPC_TTY_ST_OPEN 2 /* opened by application */
72
73#define CPC_TTY_LOCK(card,flags)\
74 do {\
75 spin_lock_irqsave(&card->card_lock, flags); \
76 } while (0)
77
78#define CPC_TTY_UNLOCK(card,flags) \
79 do {\
80 spin_unlock_irqrestore(&card->card_lock, flags); \
81 } while (0)
82
83//#define CPC_TTY_DBG(format,a...) printk(format,##a)
84#define CPC_TTY_DBG(format,a...)
85
86/* data structures */
87typedef struct _st_cpc_rx_buf {
88 struct _st_cpc_rx_buf *next;
89 int size;
90 unsigned char data[1];
91} st_cpc_rx_buf;
92
93struct st_cpc_rx_list {
94 st_cpc_rx_buf *first;
95 st_cpc_rx_buf *last;
96};
97
98typedef struct _st_cpc_tty_area {
99 int state; /* state of the TTY interface */
100 int num_open;
101 unsigned int tty_minor; /* minor this interface */
102 volatile struct st_cpc_rx_list buf_rx; /* ptr. to reception buffer */
103 unsigned char* buf_tx; /* ptr. to transmission buffer */
104 pc300dev_t* pc300dev; /* ptr. to info struct in PC300 driver */
105 unsigned char name[20]; /* interf. name + "-tty" */
106 struct tty_struct *tty;
107 struct work_struct tty_tx_work; /* tx work - tx interrupt */
108 struct work_struct tty_rx_work; /* rx work - rx interrupt */
109 } st_cpc_tty_area;
110
111/* TTY data structures */
112static struct tty_driver serial_drv;
113
114/* local variables */
115st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS];
116
117int cpc_tty_cnt=0; /* number of intrfaces configured with MLPPP */
118int cpc_tty_unreg_flag = 0;
119
120/* TTY functions prototype */
121static int cpc_tty_open(struct tty_struct *tty, struct file *flip);
122static void cpc_tty_close(struct tty_struct *tty, struct file *flip);
123static int cpc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
124static int cpc_tty_write_room(struct tty_struct *tty);
125static int cpc_tty_chars_in_buffer(struct tty_struct *tty);
126static void cpc_tty_flush_buffer(struct tty_struct *tty);
127static void cpc_tty_hangup(struct tty_struct *tty);
128static void cpc_tty_rx_work(void *data);
129static void cpc_tty_tx_work(void *data);
130static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len);
131static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
132static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
133static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char);
134
135int pc300_tiocmset(struct tty_struct *, struct file *,
136 unsigned int, unsigned int);
137int pc300_tiocmget(struct tty_struct *, struct file *);
138
139/* functions called by PC300 driver */
140void cpc_tty_init(pc300dev_t *dev);
141void cpc_tty_unregister_service(pc300dev_t *pc300dev);
142void cpc_tty_receive(pc300dev_t *pc300dev);
143void cpc_tty_trigger_poll(pc300dev_t *pc300dev);
144void cpc_tty_reset_var(void);
145
146/*
147 * PC300 TTY clear "signal"
148 */
149static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char signal)
150{
151 pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
152 pc300_t *card = (pc300_t *) pc300chan->card;
153 int ch = pc300chan->channel;
154 unsigned long flags;
155
156 CPC_TTY_DBG("%s-tty: Clear signal %x\n",
157 pc300dev->dev->name, signal);
158 CPC_TTY_LOCK(card, flags);
159 cpc_writeb(card->hw.scabase + M_REG(CTL,ch),
160 cpc_readb(card->hw.scabase+M_REG(CTL,ch))& signal);
161 CPC_TTY_UNLOCK(card,flags);
162}
163
164/*
165 * PC300 TTY set "signal" to ON
166 */
167static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char signal)
168{
169 pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
170 pc300_t *card = (pc300_t *) pc300chan->card;
171 int ch = pc300chan->channel;
172 unsigned long flags;
173
174 CPC_TTY_DBG("%s-tty: Set signal %x\n",
175 pc300dev->dev->name, signal);
176 CPC_TTY_LOCK(card, flags);
177 cpc_writeb(card->hw.scabase + M_REG(CTL,ch),
178 cpc_readb(card->hw.scabase+M_REG(CTL,ch))& ~signal);
179 CPC_TTY_UNLOCK(card,flags);
180}
181
182/*
183 * PC300 TTY initialization routine
184 *
185 * This routine is called by the PC300 driver during board configuration
186 * (ioctl=SIOCSP300CONF). At this point the adapter is completely
187 * initialized.
188 * o verify kernel version (only 2.4.x)
189 * o register TTY driver
190 * o init cpc_tty_area struct
191 */
192void cpc_tty_init(pc300dev_t *pc300dev)
193{
194 unsigned long port;
195 int aux;
196 st_cpc_tty_area * cpc_tty;
197
198 /* hdlcX - X=interface number */
199 port = pc300dev->dev->name[4] - '0';
200 if (port >= CPC_TTY_NPORTS) {
201 printk("%s-tty: invalid interface selected (0-%i): %li",
202 pc300dev->dev->name,
203 CPC_TTY_NPORTS-1,port);
204 return;
205 }
206
207 if (cpc_tty_cnt == 0) { /* first TTY connection -> register driver */
208 CPC_TTY_DBG("%s-tty: driver init, major:%i, minor range:%i=%i\n",
209 pc300dev->dev->name,
210 CPC_TTY_MAJOR, CPC_TTY_MINOR_START,
211 CPC_TTY_MINOR_START+CPC_TTY_NPORTS);
212 /* initialize tty driver struct */
213 memset(&serial_drv,0,sizeof(struct tty_driver));
214 serial_drv.magic = TTY_DRIVER_MAGIC;
215 serial_drv.owner = THIS_MODULE;
216 serial_drv.driver_name = "pc300_tty";
217 serial_drv.name = "ttyCP";
218 serial_drv.major = CPC_TTY_MAJOR;
219 serial_drv.minor_start = CPC_TTY_MINOR_START;
220 serial_drv.num = CPC_TTY_NPORTS;
221 serial_drv.type = TTY_DRIVER_TYPE_SERIAL;
222 serial_drv.subtype = SERIAL_TYPE_NORMAL;
223
224 serial_drv.init_termios = tty_std_termios;
225 serial_drv.init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
226 serial_drv.flags = TTY_DRIVER_REAL_RAW;
227
228 /* interface routines from the upper tty layer to the tty driver */
229 serial_drv.open = cpc_tty_open;
230 serial_drv.close = cpc_tty_close;
231 serial_drv.write = cpc_tty_write;
232 serial_drv.write_room = cpc_tty_write_room;
233 serial_drv.chars_in_buffer = cpc_tty_chars_in_buffer;
234 serial_drv.tiocmset = pc300_tiocmset;
235 serial_drv.tiocmget = pc300_tiocmget;
236 serial_drv.flush_buffer = cpc_tty_flush_buffer;
237 serial_drv.hangup = cpc_tty_hangup;
238
239 /* register the TTY driver */
240 if (tty_register_driver(&serial_drv)) {
241 printk("%s-tty: Failed to register serial driver! ",
242 pc300dev->dev->name);
243 return;
244 }
245
246 memset((void *)cpc_tty_area, 0,
247 sizeof(st_cpc_tty_area) * CPC_TTY_NPORTS);
248 }
249
250 cpc_tty = &cpc_tty_area[port];
251
252 if (cpc_tty->state != CPC_TTY_ST_IDLE) {
253 CPC_TTY_DBG("%s-tty: TTY port %i, already in use.\n",
254 pc300dev->dev->name, port);
255 return;
256 }
257
258 cpc_tty_cnt++;
259 cpc_tty->state = CPC_TTY_ST_INIT;
260 cpc_tty->num_open= 0;
261 cpc_tty->tty_minor = port + CPC_TTY_MINOR_START;
262 cpc_tty->pc300dev = pc300dev;
263
264 INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty);
265 INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port);
266
267 cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL;
268
269 pc300dev->cpc_tty = (void *)cpc_tty;
270
271 aux = strlen(pc300dev->dev->name);
272 memcpy(cpc_tty->name, pc300dev->dev->name, aux);
273 memcpy(&cpc_tty->name[aux], "-tty", 5);
274
275 cpc_open(pc300dev->dev);
276 cpc_tty_signal_off(pc300dev, CTL_DTR);
277
278 CPC_TTY_DBG("%s: Initializing TTY Sync Driver, tty major#%d minor#%i\n",
279 cpc_tty->name,CPC_TTY_MAJOR,cpc_tty->tty_minor);
280 return;
281}
282
283/*
284 * PC300 TTY OPEN routine
285 *
286 * This routine is called by the tty driver to open the interface
287 * o verify minor
288 * o allocate buffer to Rx and Tx
289 */
290static int cpc_tty_open(struct tty_struct *tty, struct file *flip)
291{
292 int port ;
293 st_cpc_tty_area *cpc_tty;
294
295 if (!tty) {
296 return -ENODEV;
297 }
298
299 port = tty->index;
300
301 if ((port < 0) || (port >= CPC_TTY_NPORTS)){
302 CPC_TTY_DBG("pc300_tty: open invalid port %d\n", port);
303 return -ENODEV;
304 }
305
306 cpc_tty = &cpc_tty_area[port];
307
308 if (cpc_tty->state == CPC_TTY_ST_IDLE){
309 CPC_TTY_DBG("%s: open - invalid interface, port=%d\n",
310 cpc_tty->name, tty->index);
311 return -ENODEV;
312 }
313
314 if (cpc_tty->num_open == 0) { /* first open of this tty */
315 if (!cpc_tty_area[port].buf_tx){
316 cpc_tty_area[port].buf_tx = kmalloc(CPC_TTY_MAX_MTU,GFP_KERNEL);
317 if (cpc_tty_area[port].buf_tx == 0){
318 CPC_TTY_DBG("%s: error in memory allocation\n",cpc_tty->name);
319 return -ENOMEM;
320 }
321 }
322
323 if (cpc_tty_area[port].buf_rx.first) {
324 unsigned char * aux;
325 while (cpc_tty_area[port].buf_rx.first) {
326 aux = (unsigned char *)cpc_tty_area[port].buf_rx.first;
327 cpc_tty_area[port].buf_rx.first = cpc_tty_area[port].buf_rx.first->next;
328 kfree(aux);
329 }
330 cpc_tty_area[port].buf_rx.first = NULL;
331 cpc_tty_area[port].buf_rx.last = NULL;
332 }
333
334 cpc_tty_area[port].state = CPC_TTY_ST_OPEN;
335 cpc_tty_area[port].tty = tty;
336 tty->driver_data = &cpc_tty_area[port];
337
338 cpc_tty_signal_on(cpc_tty->pc300dev, CTL_DTR);
339 }
340
341 cpc_tty->num_open++;
342
343 CPC_TTY_DBG("%s: opening TTY driver\n", cpc_tty->name);
344
345 /* avisar driver PC300 */
346 return 0;
347}
348
349/*
350 * PC300 TTY CLOSE routine
351 *
352 * This routine is called by the tty driver to close the interface
353 * o call close channel in PC300 driver (cpc_closech)
354 * o free Rx and Tx buffers
355 */
356
357static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
358{
359 st_cpc_tty_area *cpc_tty;
360 unsigned long flags;
361 int res;
362
363 if (!tty || !tty->driver_data ) {
364 CPC_TTY_DBG("hdlx-tty: no TTY in close \n");
365 return;
366 }
367
368 cpc_tty = (st_cpc_tty_area *) tty->driver_data;
369
370 if ((cpc_tty->tty != tty)|| (cpc_tty->state != CPC_TTY_ST_OPEN)) {
371 CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
372 return;
373 }
374
375 if (!cpc_tty->num_open) {
376 CPC_TTY_DBG("%s: TTY is closed\n",cpc_tty->name);
377 return;
378 }
379
380 if (--cpc_tty->num_open > 0) {
381 CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
382 return;
383 }
384
385 cpc_tty_signal_off(cpc_tty->pc300dev, CTL_DTR);
386
387 CPC_TTY_LOCK(cpc_tty->pc300dev->chan->card, flags); /* lock irq */
388 cpc_tty->tty = NULL;
389 cpc_tty->state = CPC_TTY_ST_INIT;
390 CPC_TTY_UNLOCK(cpc_tty->pc300dev->chan->card, flags); /* unlock irq */
391
392 if (cpc_tty->buf_rx.first) {
393 unsigned char * aux;
394 while (cpc_tty->buf_rx.first) {
395 aux = (unsigned char *)cpc_tty->buf_rx.first;
396 cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
397 kfree(aux);
398 }
399 cpc_tty->buf_rx.first = NULL;
400 cpc_tty->buf_rx.last = NULL;
401 }
402
403 if (cpc_tty->buf_tx) {
404 kfree(cpc_tty->buf_tx);
405 cpc_tty->buf_tx = NULL;
406 }
407
408 CPC_TTY_DBG("%s: TTY closed\n",cpc_tty->name);
409
410 if (!serial_drv.refcount && cpc_tty_unreg_flag) {
411 cpc_tty_unreg_flag = 0;
412 CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name);
413 if ((res=tty_unregister_driver(&serial_drv))) {
414 CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n",
415 cpc_tty->name,res);
416 }
417 }
418 return;
419}
420
421/*
422 * PC300 TTY WRITE routine
423 *
424 * This routine is called by the tty driver to write a series of characters
425 * to the tty device. The characters may come from user or kernel space.
426 * o verify the DCD signal
427 * o send characters to board and start the transmission
428 */
429static int cpc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
430{
431 st_cpc_tty_area *cpc_tty;
432 pc300ch_t *pc300chan;
433 pc300_t *card;
434 int ch;
435 unsigned long flags;
436 struct net_device_stats *stats;
437
438 if (!tty || !tty->driver_data ) {
439 CPC_TTY_DBG("hdlcX-tty: no TTY in write\n");
440 return -ENODEV;
441 }
442
443 cpc_tty = (st_cpc_tty_area *) tty->driver_data;
444
445 if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
446 CPC_TTY_DBG("%s: TTY is not opened\n", cpc_tty->name);
447 return -ENODEV;
448 }
449
450 if (count > CPC_TTY_MAX_MTU) {
451 CPC_TTY_DBG("%s: count is invalid\n",cpc_tty->name);
452 return -EINVAL; /* frame too big */
453 }
454
455 CPC_TTY_DBG("%s: cpc_tty_write data len=%i\n",cpc_tty->name,count);
456
457 pc300chan = (pc300ch_t *)((pc300dev_t*)cpc_tty->pc300dev)->chan;
458 stats = hdlc_stats(((pc300dev_t*)cpc_tty->pc300dev)->dev);
459 card = (pc300_t *) pc300chan->card;
460 ch = pc300chan->channel;
461
462 /* verify DCD signal*/
463 if (cpc_readb(card->hw.scabase + M_REG(ST3,ch)) & ST3_DCD) {
464 /* DCD is OFF */
465 CPC_TTY_DBG("%s : DCD is OFF\n", cpc_tty->name);
466 stats->tx_errors++;
467 stats->tx_carrier_errors++;
468 CPC_TTY_LOCK(card, flags);
469 cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_BUF_CLR);
470
471 if (card->hw.type == PC300_TE) {
472 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
473 cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
474 ~(CPLD_REG2_FALC_LED1 << (2 *ch)));
475 }
476
477 CPC_TTY_UNLOCK(card, flags);
478
479 return -EINVAL;
480 }
481
482 if (cpc_tty_send_to_card(cpc_tty->pc300dev, (void*)buf, count)) {
483 /* failed to send */
484 CPC_TTY_DBG("%s: trasmition error\n", cpc_tty->name);
485 return 0;
486 }
487 return count;
488}
489
490/*
491 * PC300 TTY Write Room routine
492 *
493 * This routine returns the numbers of characteres the tty driver will accept
494 * for queuing to be written.
495 * o return MTU
496 */
497static int cpc_tty_write_room(struct tty_struct *tty)
498{
499 st_cpc_tty_area *cpc_tty;
500
501 if (!tty || !tty->driver_data ) {
502 CPC_TTY_DBG("hdlcX-tty: no TTY to write room\n");
503 return -ENODEV;
504 }
505
506 cpc_tty = (st_cpc_tty_area *) tty->driver_data;
507
508 if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
509 CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
510 return -ENODEV;
511 }
512
513 CPC_TTY_DBG("%s: write room\n",cpc_tty->name);
514
515 return CPC_TTY_MAX_MTU;
516}
517
518/*
519 * PC300 TTY chars in buffer routine
520 *
521 * This routine returns the chars number in the transmission buffer
522 * o returns 0
523 */
524static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
525{
526 st_cpc_tty_area *cpc_tty;
527
528 if (!tty || !tty->driver_data ) {
529 CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");
530 return -ENODEV;
531 }
532
533 cpc_tty = (st_cpc_tty_area *) tty->driver_data;
534
535 if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
536 CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
537 return -ENODEV;
538 }
539
540 return(0);
541}
542
543int pc300_tiocmset(struct tty_struct *tty, struct file *file,
544 unsigned int set, unsigned int clear)
545{
546 st_cpc_tty_area *cpc_tty;
547
548 CPC_TTY_DBG("%s: set:%x clear:%x\n", __FUNCTION__, set, clear);
549
550 if (!tty || !tty->driver_data ) {
551 CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");
552 return -ENODEV;
553 }
554
555 cpc_tty = (st_cpc_tty_area *) tty->driver_data;
556
557 if (set & TIOCM_RTS)
558 cpc_tty_signal_on(cpc_tty->pc300dev, CTL_RTS);
559 if (set & TIOCM_DTR)
560 cpc_tty_signal_on(cpc_tty->pc300dev, CTL_DTR);
561
562 if (clear & TIOCM_RTS)
563 cpc_tty_signal_off(cpc_tty->pc300dev, CTL_RTS);
564 if (clear & TIOCM_DTR)
565 cpc_tty_signal_off(cpc_tty->pc300dev, CTL_DTR);
566
567 return 0;
568}
569
570int pc300_tiocmget(struct tty_struct *tty, struct file *file)
571{
572 unsigned int result;
573 unsigned char status;
574 unsigned long flags;
575 st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) tty->driver_data;
576 pc300dev_t *pc300dev = cpc_tty->pc300dev;
577 pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
578 pc300_t *card = (pc300_t *) pc300chan->card;
579 int ch = pc300chan->channel;
580
581 cpc_tty = (st_cpc_tty_area *) tty->driver_data;
582
583 CPC_TTY_DBG("%s-tty: tiocmget\n",
584 ((struct net_device*)(pc300dev->hdlc))->name);
585
586 CPC_TTY_LOCK(card, flags);
587 status = cpc_readb(card->hw.scabase+M_REG(CTL,ch));
588 CPC_TTY_UNLOCK(card,flags);
589
590 result = ((status & CTL_DTR) ? TIOCM_DTR : 0) |
591 ((status & CTL_RTS) ? TIOCM_RTS : 0);
592
593 return result;
594}
595
596/*
597 * PC300 TTY Flush Buffer routine
598 *
599 * This routine resets the transmission buffer
600 */
601static void cpc_tty_flush_buffer(struct tty_struct *tty)
602{
603 st_cpc_tty_area *cpc_tty;
604
605 if (!tty || !tty->driver_data ) {
606 CPC_TTY_DBG("hdlcX-tty: no TTY to flush buffer\n");
607 return;
608 }
609
610 cpc_tty = (st_cpc_tty_area *) tty->driver_data;
611
612 if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
613 CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
614 return;
615 }
616
617 CPC_TTY_DBG("%s: call wake_up_interruptible\n",cpc_tty->name);
618
619 tty_wakeup(tty);
620 return;
621}
622
623/*
624 * PC300 TTY Hangup routine
625 *
626 * This routine is called by the tty driver to hangup the interface
627 * o clear DTR signal
628 */
629
630static void cpc_tty_hangup(struct tty_struct *tty)
631{
632 st_cpc_tty_area *cpc_tty;
633 int res;
634
635 if (!tty || !tty->driver_data ) {
636 CPC_TTY_DBG("hdlcX-tty: no TTY to hangup\n");
637 return ;
638 }
639
640 cpc_tty = (st_cpc_tty_area *) tty->driver_data;
641
642 if ((cpc_tty->tty != tty) || (cpc_tty->state != CPC_TTY_ST_OPEN)) {
643 CPC_TTY_DBG("%s: TTY is not opened\n",cpc_tty->name);
644 return ;
645 }
646 if (!serial_drv.refcount && cpc_tty_unreg_flag) {
647 cpc_tty_unreg_flag = 0;
648 CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name);
649 if ((res=tty_unregister_driver(&serial_drv))) {
650 CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n",
651 cpc_tty->name,res);
652 }
653 }
654 cpc_tty_signal_off(cpc_tty->pc300dev, CTL_DTR);
655}
656
657/*
658 * PC300 TTY RX work routine
659 * This routine treats RX work
660 * o verify read buffer
661 * o call the line disc. read
662 * o free memory
663 */
664static void cpc_tty_rx_work(void * data)
665{
666 unsigned long port;
667 int i, j;
668 st_cpc_tty_area *cpc_tty;
669 volatile st_cpc_rx_buf * buf;
670 char flags=0,flg_rx=1;
671 struct tty_ldisc *ld;
672
673 if (cpc_tty_cnt == 0) return;
674
675
676 for (i=0; (i < 4) && flg_rx ; i++) {
677 flg_rx = 0;
678 port = (unsigned long)data;
679 for (j=0; j < CPC_TTY_NPORTS; j++) {
680 cpc_tty = &cpc_tty_area[port];
681
682 if ((buf=cpc_tty->buf_rx.first) != 0) {
683 if(cpc_tty->tty) {
684 ld = tty_ldisc_ref(cpc_tty->tty);
685 if(ld) {
686 if (ld->receive_buf) {
687 CPC_TTY_DBG("%s: call line disc. receive_buf\n",cpc_tty->name);
688 ld->receive_buf(cpc_tty->tty, (char *)(buf->data), &flags, buf->size);
689 }
690 tty_ldisc_deref(ld);
691 }
692 }
693 cpc_tty->buf_rx.first = cpc_tty->buf_rx.first->next;
694 kfree((unsigned char *)buf);
695 buf = cpc_tty->buf_rx.first;
696 flg_rx = 1;
697 }
698 if (++port == CPC_TTY_NPORTS) port = 0;
699 }
700 }
701}
702
703/*
704 * PC300 TTY RX work routine
705 *
706 * This routine treats RX interrupt.
707 * o read all frames in card
708 * o verify the frame size
709 * o read the frame in rx buffer
710 */
711static void cpc_tty_rx_disc_frame(pc300ch_t *pc300chan)
712{
713 volatile pcsca_bd_t __iomem * ptdescr;
714 volatile unsigned char status;
715 pc300_t *card = (pc300_t *)pc300chan->card;
716 int ch = pc300chan->channel;
717
718 /* dma buf read */
719 ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase +
720 RX_BD_ADDR(ch, pc300chan->rx_first_bd));
721 while (pc300chan->rx_first_bd != pc300chan->rx_last_bd) {
722 status = cpc_readb(&ptdescr->status);
723 cpc_writeb(&ptdescr->status, 0);
724 cpc_writeb(&ptdescr->len, 0);
725 pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) &
726 (N_DMA_RX_BUF - 1);
727 if (status & DST_EOM) {
728 break; /* end of message */
729 }
730 ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase + cpc_readl(&ptdescr->next));
731 }
732}
733
734void cpc_tty_receive(pc300dev_t *pc300dev)
735{
736 st_cpc_tty_area *cpc_tty;
737 pc300ch_t *pc300chan = (pc300ch_t *)pc300dev->chan;
738 pc300_t *card = (pc300_t *)pc300chan->card;
739 int ch = pc300chan->channel;
740 volatile pcsca_bd_t __iomem * ptdescr;
741 struct net_device_stats *stats = hdlc_stats(pc300dev->dev);
742 int rx_len, rx_aux;
743 volatile unsigned char status;
744 unsigned short first_bd = pc300chan->rx_first_bd;
745 st_cpc_rx_buf *new=NULL;
746 unsigned char dsr_rx;
747
748 if (pc300dev->cpc_tty == NULL) {
749 return;
750 }
751
752 dsr_rx = cpc_readb(card->hw.scabase + DSR_RX(ch));
753
754 cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty;
755
756 while (1) {
757 rx_len = 0;
758 ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase + RX_BD_ADDR(ch, first_bd));
759 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
760 rx_len += cpc_readw(&ptdescr->len);
761 first_bd = (first_bd + 1) & (N_DMA_RX_BUF - 1);
762 if (status & DST_EOM) {
763 break;
764 }
765 ptdescr=(pcsca_bd_t __iomem *)(card->hw.rambase+cpc_readl(&ptdescr->next));
766 }
767
768 if (!rx_len) {
769 if (dsr_rx & DSR_BOF) {
770 /* update EDA */
771 cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
772 RX_BD_ADDR(ch, pc300chan->rx_last_bd));
773 }
774 if (new) {
775 kfree(new);
776 new = NULL;
777 }
778 return;
779 }
780
781 if (rx_len > CPC_TTY_MAX_MTU) {
782 /* Free RX descriptors */
783 CPC_TTY_DBG("%s: frame size is invalid.\n",cpc_tty->name);
784 stats->rx_errors++;
785 stats->rx_frame_errors++;
786 cpc_tty_rx_disc_frame(pc300chan);
787 continue;
788 }
789
790 new = (st_cpc_rx_buf *) kmalloc(rx_len + sizeof(st_cpc_rx_buf), GFP_ATOMIC);
791 if (new == 0) {
792 cpc_tty_rx_disc_frame(pc300chan);
793 continue;
794 }
795
796 /* dma buf read */
797 ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase +
798 RX_BD_ADDR(ch, pc300chan->rx_first_bd));
799
800 rx_len = 0; /* counter frame size */
801
802 while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) {
803 rx_aux = cpc_readw(&ptdescr->len);
804 if ((status & (DST_OVR | DST_CRC | DST_RBIT | DST_SHRT | DST_ABT))
805 || (rx_aux > BD_DEF_LEN)) {
806 CPC_TTY_DBG("%s: reception error\n", cpc_tty->name);
807 stats->rx_errors++;
808 if (status & DST_OVR) {
809 stats->rx_fifo_errors++;
810 }
811 if (status & DST_CRC) {
812 stats->rx_crc_errors++;
813 }
814 if ((status & (DST_RBIT | DST_SHRT | DST_ABT)) ||
815 (rx_aux > BD_DEF_LEN)) {
816 stats->rx_frame_errors++;
817 }
818 /* discard remainig descriptors used by the bad frame */
819 CPC_TTY_DBG("%s: reception error - discard descriptors",
820 cpc_tty->name);
821 cpc_tty_rx_disc_frame(pc300chan);
822 rx_len = 0;
823 kfree(new);
824 new = NULL;
825 break; /* read next frame - while(1) */
826 }
827
828 if (cpc_tty->state != CPC_TTY_ST_OPEN) {
829 /* Free RX descriptors */
830 cpc_tty_rx_disc_frame(pc300chan);
831 stats->rx_dropped++;
832 rx_len = 0;
833 kfree(new);
834 new = NULL;
835 break; /* read next frame - while(1) */
836 }
837
838 /* read the segment of the frame */
839 if (rx_aux != 0) {
840 memcpy_fromio((new->data + rx_len),
841 (void __iomem *)(card->hw.rambase +
842 cpc_readl(&ptdescr->ptbuf)), rx_aux);
843 rx_len += rx_aux;
844 }
845 cpc_writeb(&ptdescr->status,0);
846 cpc_writeb(&ptdescr->len, 0);
847 pc300chan->rx_first_bd = (pc300chan->rx_first_bd + 1) &
848 (N_DMA_RX_BUF -1);
849 if (status & DST_EOM)break;
850
851 ptdescr = (pcsca_bd_t __iomem *) (card->hw.rambase +
852 cpc_readl(&ptdescr->next));
853 }
854 /* update pointer */
855 pc300chan->rx_last_bd = (pc300chan->rx_first_bd - 1) &
856 (N_DMA_RX_BUF - 1) ;
857 if (!(dsr_rx & DSR_BOF)) {
858 /* update EDA */
859 cpc_writel(card->hw.scabase + DRX_REG(EDAL, ch),
860 RX_BD_ADDR(ch, pc300chan->rx_last_bd));
861 }
862 if (rx_len != 0) {
863 stats->rx_bytes += rx_len;
864
865 if (pc300dev->trace_on) {
866 cpc_tty_trace(pc300dev, new->data,rx_len, 'R');
867 }
868 new->size = rx_len;
869 new->next = NULL;
870 if (cpc_tty->buf_rx.first == 0) {
871 cpc_tty->buf_rx.first = new;
872 cpc_tty->buf_rx.last = new;
873 } else {
874 cpc_tty->buf_rx.last->next = new;
875 cpc_tty->buf_rx.last = new;
876 }
877 schedule_work(&(cpc_tty->tty_rx_work));
878 stats->rx_packets++;
879 }
880 }
881}
882
883/*
884 * PC300 TTY TX work routine
885 *
886 * This routine treats TX interrupt.
887 * o if need call line discipline wakeup
888 * o call wake_up_interruptible
889 */
890static void cpc_tty_tx_work(void *data)
891{
892 st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data;
893 struct tty_struct *tty;
894
895 CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name);
896
897 if ((tty = cpc_tty->tty) == 0) {
898 CPC_TTY_DBG("%s: the interface is not opened\n",cpc_tty->name);
899 return;
900 }
901 tty_wakeup(tty);
902}
903
904/*
905 * PC300 TTY send to card routine
906 *
907 * This routine send data to card.
908 * o clear descriptors
909 * o write data to DMA buffers
910 * o start the transmission
911 */
912static int cpc_tty_send_to_card(pc300dev_t *dev,void* buf, int len)
913{
914 pc300ch_t *chan = (pc300ch_t *)dev->chan;
915 pc300_t *card = (pc300_t *)chan->card;
916 int ch = chan->channel;
917 struct net_device_stats *stats = hdlc_stats(dev->dev);
918 unsigned long flags;
919 volatile pcsca_bd_t __iomem *ptdescr;
920 int i, nchar;
921 int tosend = len;
922 int nbuf = ((len - 1)/BD_DEF_LEN) + 1;
923 unsigned char *pdata=buf;
924
925 CPC_TTY_DBG("%s:cpc_tty_send_to_cars len=%i",
926 (st_cpc_tty_area *)dev->cpc_tty->name,len);
927
928 if (nbuf >= card->chan[ch].nfree_tx_bd) {
929 return 1;
930 }
931
932 /* write buffer to DMA buffers */
933 CPC_TTY_DBG("%s: call dma_buf_write\n",
934 (st_cpc_tty_area *)dev->cpc_tty->name);
935 for (i = 0 ; i < nbuf ; i++) {
936 ptdescr = (pcsca_bd_t __iomem *)(card->hw.rambase +
937 TX_BD_ADDR(ch, card->chan[ch].tx_next_bd));
938 nchar = (BD_DEF_LEN > tosend) ? tosend : BD_DEF_LEN;
939 if (cpc_readb(&ptdescr->status) & DST_OSB) {
940 memcpy_toio((void __iomem *)(card->hw.rambase +
941 cpc_readl(&ptdescr->ptbuf)),
942 &pdata[len - tosend],
943 nchar);
944 card->chan[ch].nfree_tx_bd--;
945 if ((i + 1) == nbuf) {
946 /* This must be the last BD to be used */
947 cpc_writeb(&ptdescr->status, DST_EOM);
948 } else {
949 cpc_writeb(&ptdescr->status, 0);
950 }
951 cpc_writew(&ptdescr->len, nchar);
952 } else {
953 CPC_TTY_DBG("%s: error in dma_buf_write\n",
954 (st_cpc_tty_area *)dev->cpc_tty->name);
955 stats->tx_dropped++;
956 return 1;
957 }
958 tosend -= nchar;
959 card->chan[ch].tx_next_bd =
960 (card->chan[ch].tx_next_bd + 1) & (N_DMA_TX_BUF - 1);
961 }
962
963 if (dev->trace_on) {
964 cpc_tty_trace(dev, buf, len,'T');
965 }
966
967 /* start transmission */
968 CPC_TTY_DBG("%s: start transmission\n",
969 (st_cpc_tty_area *)dev->cpc_tty->name);
970
971 CPC_TTY_LOCK(card, flags);
972 cpc_writeb(card->hw.scabase + DTX_REG(EDAL, ch),
973 TX_BD_ADDR(ch, chan->tx_next_bd));
974 cpc_writeb(card->hw.scabase + M_REG(CMD, ch), CMD_TX_ENA);
975 cpc_writeb(card->hw.scabase + DSR_TX(ch), DSR_DE);
976
977 if (card->hw.type == PC300_TE) {
978 cpc_writeb(card->hw.falcbase + card->hw.cpld_reg2,
979 cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) |
980 (CPLD_REG2_FALC_LED1 << (2 * ch)));
981 }
982 CPC_TTY_UNLOCK(card, flags);
983 return 0;
984}
985
986/*
987 * PC300 TTY trace routine
988 *
989 * This routine send trace of connection to application.
990 * o clear descriptors
991 * o write data to DMA buffers
992 * o start the transmission
993 */
994
995static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx)
996{
997 struct sk_buff *skb;
998
999 if ((skb = dev_alloc_skb(10 + len)) == NULL) {
1000 /* out of memory */
1001 CPC_TTY_DBG("%s: tty_trace - out of memory\n", dev->dev->name);
1002 return;
1003 }
1004
1005 skb_put (skb, 10 + len);
1006 skb->dev = dev->dev;
1007 skb->protocol = htons(ETH_P_CUST);
1008 skb->mac.raw = skb->data;
1009 skb->pkt_type = PACKET_HOST;
1010 skb->len = 10 + len;
1011
1012 memcpy(skb->data,dev->dev->name,5);
1013 skb->data[5] = '[';
1014 skb->data[6] = rxtx;
1015 skb->data[7] = ']';
1016 skb->data[8] = ':';
1017 skb->data[9] = ' ';
1018 memcpy(&skb->data[10], buf, len);
1019 netif_rx(skb);
1020}
1021
1022/*
1023 * PC300 TTY unregister service routine
1024 *
1025 * This routine unregister one interface.
1026 */
1027void cpc_tty_unregister_service(pc300dev_t *pc300dev)
1028{
1029 st_cpc_tty_area *cpc_tty;
1030 ulong flags;
1031 int res;
1032
1033 if ((cpc_tty= (st_cpc_tty_area *) pc300dev->cpc_tty) == 0) {
1034 CPC_TTY_DBG("%s: interface is not TTY\n", pc300dev->dev->name);
1035 return;
1036 }
1037 CPC_TTY_DBG("%s: cpc_tty_unregister_service", cpc_tty->name);
1038
1039 if (cpc_tty->pc300dev != pc300dev) {
1040 CPC_TTY_DBG("%s: invalid tty ptr=%s\n",
1041 pc300dev->dev->name, cpc_tty->name);
1042 return;
1043 }
1044
1045 if (--cpc_tty_cnt == 0) {
1046 if (serial_drv.refcount) {
1047 CPC_TTY_DBG("%s: unregister is not possible, refcount=%d",
1048 cpc_tty->name, serial_drv.refcount);
1049 cpc_tty_cnt++;
1050 cpc_tty_unreg_flag = 1;
1051 return;
1052 } else {
1053 CPC_TTY_DBG("%s: unregister the tty driver\n", cpc_tty->name);
1054 if ((res=tty_unregister_driver(&serial_drv))) {
1055 CPC_TTY_DBG("%s: ERROR ->unregister the tty driver error=%d\n",
1056 cpc_tty->name,res);
1057 }
1058 }
1059 }
1060 CPC_TTY_LOCK(pc300dev->chan->card,flags);
1061 cpc_tty->tty = NULL;
1062 CPC_TTY_UNLOCK(pc300dev->chan->card, flags);
1063 cpc_tty->tty_minor = 0;
1064 cpc_tty->state = CPC_TTY_ST_IDLE;
1065}
1066
1067/*
1068 * PC300 TTY trigger poll routine
1069 * This routine is called by pc300driver to treats Tx interrupt.
1070 */
1071void cpc_tty_trigger_poll(pc300dev_t *pc300dev)
1072{
1073 st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *)pc300dev->cpc_tty;
1074 if (!cpc_tty) {
1075 return;
1076 }
1077 schedule_work(&(cpc_tty->tty_tx_work));
1078}
1079
1080/*
1081 * PC300 TTY reset var routine
1082 * This routine is called by pc300driver to init the TTY area.
1083 */
1084
1085void cpc_tty_reset_var(void)
1086{
1087 int i ;
1088
1089 CPC_TTY_DBG("hdlcX-tty: reset variables\n");
1090 /* reset the tty_driver structure - serial_drv */
1091 memset(&serial_drv, 0, sizeof(struct tty_driver));
1092 for (i=0; i < CPC_TTY_NPORTS; i++){
1093 memset(&cpc_tty_area[i],0, sizeof(st_cpc_tty_area));
1094 }
1095}
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
new file mode 100644
index 000000000000..8dea07b47999
--- /dev/null
+++ b/drivers/net/wan/pci200syn.c
@@ -0,0 +1,488 @@
1/*
2 * Goramo PCI200SYN synchronous serial card driver for Linux
3 *
4 * Copyright (C) 2002-2003 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * For information see http://hq.pm.waw.pl/hdlc/
11 *
12 * Sources of information:
13 * Hitachi HD64572 SCA-II User's Manual
14 * PLX Technology Inc. PCI9052 Data Book
15 */
16
17#include <linux/module.h>
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/types.h>
22#include <linux/fcntl.h>
23#include <linux/in.h>
24#include <linux/string.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/ioport.h>
28#include <linux/moduleparam.h>
29#include <linux/netdevice.h>
30#include <linux/hdlc.h>
31#include <linux/pci.h>
32#include <asm/delay.h>
33#include <asm/io.h>
34
35#include "hd64572.h"
36
37static const char* version = "Goramo PCI200SYN driver version: 1.16";
38static const char* devname = "PCI200SYN";
39
40#undef DEBUG_PKT
41#define DEBUG_RINGS
42
43#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */
44#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */
45#define ALL_PAGES_ALWAYS_MAPPED
46#define NEED_DETECT_RAM
47#define NEED_SCA_MSCI_INTR
48#define MAX_TX_BUFFERS 10
49
50static int pci_clock_freq = 33000000;
51#define CLOCK_BASE pci_clock_freq
52
53#define PCI_VENDOR_ID_GORAMO 0x10B5 /* uses PLX:9050 ID - this card */
54#define PCI_DEVICE_ID_PCI200SYN 0x9050 /* doesn't have its own ID */
55
56
57/*
58 * PLX PCI9052 local configuration and shared runtime registers.
59 * This structure can be used to access 9052 registers (memory mapped).
60 */
61typedef struct {
62 u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
63 u32 loc_rom_range; /* 10h : Local ROM Range */
64 u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
65 u32 loc_rom_base; /* 24h : Local ROM Base */
66 u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
67 u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
68 u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
69 u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
70 u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
71}plx9052;
72
73
74
75typedef struct port_s {
76 struct net_device *dev;
77 struct card_s *card;
78 spinlock_t lock; /* TX lock */
79 sync_serial_settings settings;
80 int rxpart; /* partial frame received, next frame invalid*/
81 unsigned short encoding;
82 unsigned short parity;
83 u16 rxin; /* rx ring buffer 'in' pointer */
84 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
85 u16 txlast;
86 u8 rxs, txs, tmc; /* SCA registers */
87 u8 phy_node; /* physical port # - 0 or 1 */
88}port_t;
89
90
91
92typedef struct card_s {
93 u8 __iomem *rambase; /* buffer memory base (virtual) */
94 u8 __iomem *scabase; /* SCA memory base (virtual) */
95 plx9052 __iomem *plxbase;/* PLX registers memory base (virtual) */
96 u16 rx_ring_buffers; /* number of buffers in a ring */
97 u16 tx_ring_buffers;
98 u16 buff_offset; /* offset of first buffer of first channel */
99 u8 irq; /* interrupt request level */
100
101 port_t ports[2];
102}card_t;
103
104
105#define sca_in(reg, card) readb(card->scabase + (reg))
106#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
107#define sca_inw(reg, card) readw(card->scabase + (reg))
108#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
109#define sca_inl(reg, card) readl(card->scabase + (reg))
110#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
111
112#define port_to_card(port) (port->card)
113#define log_node(port) (port->phy_node)
114#define phy_node(port) (port->phy_node)
115#define winbase(card) (card->rambase)
116#define get_port(card, port) (&card->ports[port])
117#define sca_flush(card) (sca_in(IER0, card));
118
119static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
120{
121 int len;
122 do {
123 len = length > 256 ? 256 : length;
124 memcpy_toio(dest, src, len);
125 dest += len;
126 src += len;
127 length -= len;
128 readb(dest);
129 } while (len);
130}
131
132#undef memcpy_toio
133#define memcpy_toio new_memcpy_toio
134
135#include "hd6457x.c"
136
137
138static void pci200_set_iface(port_t *port)
139{
140 card_t *card = port->card;
141 u16 msci = get_msci(port);
142 u8 rxs = port->rxs & CLK_BRG_MASK;
143 u8 txs = port->txs & CLK_BRG_MASK;
144
145 sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
146 port_to_card(port));
147 switch(port->settings.clock_type) {
148 case CLOCK_INT:
149 rxs |= CLK_BRG; /* BRG output */
150 txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
151 break;
152
153 case CLOCK_TXINT:
154 rxs |= CLK_LINE; /* RXC input */
155 txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
156 break;
157
158 case CLOCK_TXFROMRX:
159 rxs |= CLK_LINE; /* RXC input */
160 txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
161 break;
162
163 default: /* EXTernal clock */
164 rxs |= CLK_LINE; /* RXC input */
165 txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
166 break;
167 }
168
169 port->rxs = rxs;
170 port->txs = txs;
171 sca_out(rxs, msci + RXS, card);
172 sca_out(txs, msci + TXS, card);
173 sca_set_port(port);
174}
175
176
177
178static int pci200_open(struct net_device *dev)
179{
180 port_t *port = dev_to_port(dev);
181
182 int result = hdlc_open(dev);
183 if (result)
184 return result;
185
186 sca_open(dev);
187 pci200_set_iface(port);
188 sca_flush(port_to_card(port));
189 return 0;
190}
191
192
193
194static int pci200_close(struct net_device *dev)
195{
196 sca_close(dev);
197 sca_flush(port_to_card(dev_to_port(dev)));
198 hdlc_close(dev);
199 return 0;
200}
201
202
203
204static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
205{
206 const size_t size = sizeof(sync_serial_settings);
207 sync_serial_settings new_line;
208 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
209 port_t *port = dev_to_port(dev);
210
211#ifdef DEBUG_RINGS
212 if (cmd == SIOCDEVPRIVATE) {
213 sca_dump_rings(dev);
214 return 0;
215 }
216#endif
217 if (cmd != SIOCWANDEV)
218 return hdlc_ioctl(dev, ifr, cmd);
219
220 switch(ifr->ifr_settings.type) {
221 case IF_GET_IFACE:
222 ifr->ifr_settings.type = IF_IFACE_V35;
223 if (ifr->ifr_settings.size < size) {
224 ifr->ifr_settings.size = size; /* data size wanted */
225 return -ENOBUFS;
226 }
227 if (copy_to_user(line, &port->settings, size))
228 return -EFAULT;
229 return 0;
230
231 case IF_IFACE_V35:
232 case IF_IFACE_SYNC_SERIAL:
233 if (!capable(CAP_NET_ADMIN))
234 return -EPERM;
235
236 if (copy_from_user(&new_line, line, size))
237 return -EFAULT;
238
239 if (new_line.clock_type != CLOCK_EXT &&
240 new_line.clock_type != CLOCK_TXFROMRX &&
241 new_line.clock_type != CLOCK_INT &&
242 new_line.clock_type != CLOCK_TXINT)
243 return -EINVAL; /* No such clock setting */
244
245 if (new_line.loopback != 0 && new_line.loopback != 1)
246 return -EINVAL;
247
248 memcpy(&port->settings, &new_line, size); /* Update settings */
249 pci200_set_iface(port);
250 sca_flush(port_to_card(port));
251 return 0;
252
253 default:
254 return hdlc_ioctl(dev, ifr, cmd);
255 }
256}
257
258
259
260static void pci200_pci_remove_one(struct pci_dev *pdev)
261{
262 int i;
263 card_t *card = pci_get_drvdata(pdev);
264
265 for(i = 0; i < 2; i++)
266 if (card->ports[i].card) {
267 struct net_device *dev = port_to_dev(&card->ports[i]);
268 unregister_hdlc_device(dev);
269 }
270
271 if (card->irq)
272 free_irq(card->irq, card);
273
274 if (card->rambase)
275 iounmap(card->rambase);
276 if (card->scabase)
277 iounmap(card->scabase);
278 if (card->plxbase)
279 iounmap(card->plxbase);
280
281 pci_release_regions(pdev);
282 pci_disable_device(pdev);
283 pci_set_drvdata(pdev, NULL);
284 if (card->ports[0].dev)
285 free_netdev(card->ports[0].dev);
286 if (card->ports[1].dev)
287 free_netdev(card->ports[1].dev);
288 kfree(card);
289}
290
291
292
293static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
294 const struct pci_device_id *ent)
295{
296 card_t *card;
297 u8 rev_id;
298 u32 __iomem *p;
299 int i;
300 u32 ramsize;
301 u32 ramphys; /* buffer memory base */
302 u32 scaphys; /* SCA memory base */
303 u32 plxphys; /* PLX registers memory base */
304
305#ifndef MODULE
306 static int printed_version;
307 if (!printed_version++)
308 printk(KERN_INFO "%s\n", version);
309#endif
310
311 i = pci_enable_device(pdev);
312 if (i)
313 return i;
314
315 i = pci_request_regions(pdev, "PCI200SYN");
316 if (i) {
317 pci_disable_device(pdev);
318 return i;
319 }
320
321 card = kmalloc(sizeof(card_t), GFP_KERNEL);
322 if (card == NULL) {
323 printk(KERN_ERR "pci200syn: unable to allocate memory\n");
324 pci_release_regions(pdev);
325 pci_disable_device(pdev);
326 return -ENOBUFS;
327 }
328 memset(card, 0, sizeof(card_t));
329 pci_set_drvdata(pdev, card);
330 card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
331 card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
332 if (!card->ports[0].dev || !card->ports[1].dev) {
333 printk(KERN_ERR "pci200syn: unable to allocate memory\n");
334 pci200_pci_remove_one(pdev);
335 return -ENOMEM;
336 }
337
338 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
339 if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
340 pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
341 pci_resource_len(pdev, 3) < 16384) {
342 printk(KERN_ERR "pci200syn: invalid card EEPROM parameters\n");
343 pci200_pci_remove_one(pdev);
344 return -EFAULT;
345 }
346
347 plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
348 card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);
349
350 scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
351 card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
352
353 ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
354 card->rambase = ioremap(ramphys, pci_resource_len(pdev,3));
355
356 if (card->plxbase == NULL ||
357 card->scabase == NULL ||
358 card->rambase == NULL) {
359 printk(KERN_ERR "pci200syn: ioremap() failed\n");
360 pci200_pci_remove_one(pdev);
361 }
362
363 /* Reset PLX */
364 p = &card->plxbase->init_ctrl;
365 writel(readl(p) | 0x40000000, p);
366 readl(p); /* Flush the write - do not use sca_flush */
367 udelay(1);
368
369 writel(readl(p) & ~0x40000000, p);
370 readl(p); /* Flush the write - do not use sca_flush */
371 udelay(1);
372
373 ramsize = sca_detect_ram(card, card->rambase,
374 pci_resource_len(pdev, 3));
375
376 /* number of TX + RX buffers for one port - this is dual port card */
377 i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU));
378 card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
379 card->rx_ring_buffers = i - card->tx_ring_buffers;
380
381 card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers +
382 card->rx_ring_buffers);
383
384 printk(KERN_INFO "pci200syn: %u KB RAM at 0x%x, IRQ%u, using %u TX +"
385 " %u RX packets rings\n", ramsize / 1024, ramphys,
386 pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
387
388 if (card->tx_ring_buffers < 1) {
389 printk(KERN_ERR "pci200syn: RAM test failed\n");
390 pci200_pci_remove_one(pdev);
391 return -EFAULT;
392 }
393
394 /* Enable interrupts on the PCI bridge */
395 p = &card->plxbase->intr_ctrl_stat;
396 writew(readw(p) | 0x0040, p);
397
398 /* Allocate IRQ */
399 if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
400 printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
401 pdev->irq);
402 pci200_pci_remove_one(pdev);
403 return -EBUSY;
404 }
405 card->irq = pdev->irq;
406
407 sca_init(card, 0);
408
409 for(i = 0; i < 2; i++) {
410 port_t *port = &card->ports[i];
411 struct net_device *dev = port_to_dev(port);
412 hdlc_device *hdlc = dev_to_hdlc(dev);
413 port->phy_node = i;
414
415 spin_lock_init(&port->lock);
416 SET_MODULE_OWNER(dev);
417 dev->irq = card->irq;
418 dev->mem_start = ramphys;
419 dev->mem_end = ramphys + ramsize - 1;
420 dev->tx_queue_len = 50;
421 dev->do_ioctl = pci200_ioctl;
422 dev->open = pci200_open;
423 dev->stop = pci200_close;
424 hdlc->attach = sca_attach;
425 hdlc->xmit = sca_xmit;
426 port->settings.clock_type = CLOCK_EXT;
427 port->card = card;
428 if(register_hdlc_device(dev)) {
429 printk(KERN_ERR "pci200syn: unable to register hdlc "
430 "device\n");
431 port->card = NULL;
432 pci200_pci_remove_one(pdev);
433 return -ENOBUFS;
434 }
435 sca_init_sync_port(port); /* Set up SCA memory */
436
437 printk(KERN_INFO "%s: PCI200SYN node %d\n",
438 dev->name, port->phy_node);
439 }
440
441 sca_flush(card);
442 return 0;
443}
444
445
446
447static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
448 { PCI_VENDOR_ID_GORAMO, PCI_DEVICE_ID_PCI200SYN, PCI_ANY_ID,
449 PCI_ANY_ID, 0, 0, 0 },
450 { 0, }
451};
452
453
454static struct pci_driver pci200_pci_driver = {
455 .name = "PCI200SYN",
456 .id_table = pci200_pci_tbl,
457 .probe = pci200_pci_init_one,
458 .remove = pci200_pci_remove_one,
459};
460
461
462static int __init pci200_init_module(void)
463{
464#ifdef MODULE
465 printk(KERN_INFO "%s\n", version);
466#endif
467 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
468 printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n");
469 return -EINVAL;
470 }
471 return pci_module_init(&pci200_pci_driver);
472}
473
474
475
476static void __exit pci200_cleanup_module(void)
477{
478 pci_unregister_driver(&pci200_pci_driver);
479}
480
481MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
482MODULE_DESCRIPTION("Goramo PCI200SYN serial port driver");
483MODULE_LICENSE("GPL v2");
484MODULE_DEVICE_TABLE(pci, pci200_pci_tbl);
485module_param(pci_clock_freq, int, 0444);
486MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
487module_init(pci200_init_module);
488module_exit(pci200_cleanup_module);
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
new file mode 100644
index 000000000000..db2c798ba89e
--- /dev/null
+++ b/drivers/net/wan/sbni.c
@@ -0,0 +1,1735 @@
1/* sbni.c: Granch SBNI12 leased line adapters driver for linux
2 *
3 * Written 2001 by Denis I.Timofeev (timofeev@granch.ru)
4 *
5 * Previous versions were written by Yaroslav Polyakov,
6 * Alexey Zverev and Max Khon.
7 *
8 * Driver supports SBNI12-02,-04,-05,-10,-11 cards, single and
9 * double-channel, PCI and ISA modifications.
10 * More info and useful utilities to work with SBNI12 cards you can find
11 * at http://www.granch.com (English) or http://www.granch.ru (Russian)
12 *
13 * This software may be used and distributed according to the terms
14 * of the GNU General Public License.
15 *
16 *
17 * 5.0.1 Jun 22 2001
18 * - Fixed bug in probe
19 * 5.0.0 Jun 06 2001
20 * - Driver was completely redesigned by Denis I.Timofeev,
21 * - now PCI/Dual, ISA/Dual (with single interrupt line) models are
22 * - supported
23 * 3.3.0 Thu Feb 24 21:30:28 NOVT 2000
24 * - PCI cards support
25 * 3.2.0 Mon Dec 13 22:26:53 NOVT 1999
26 * - Completely rebuilt all the packet storage system
27 * - to work in Ethernet-like style.
28 * 3.1.1 just fixed some bugs (5 aug 1999)
29 * 3.1.0 added balancing feature (26 apr 1999)
30 * 3.0.1 just fixed some bugs (14 apr 1999).
31 * 3.0.0 Initial Revision, Yaroslav Polyakov (24 Feb 1999)
32 * - added pre-calculation for CRC, fixed bug with "len-2" frames,
33 * - removed outbound fragmentation (MTU=1000), written CRC-calculation
34 * - on asm, added work with hard_headers and now we have our own cache
35 * - for them, optionally supported word-interchange on some chipsets,
36 *
37 * Known problem: this driver wasn't tested on multiprocessor machine.
38 */
39
40#include <linux/config.h>
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/ptrace.h>
44#include <linux/fcntl.h>
45#include <linux/ioport.h>
46#include <linux/interrupt.h>
47#include <linux/slab.h>
48#include <linux/string.h>
49#include <linux/errno.h>
50#include <linux/netdevice.h>
51#include <linux/etherdevice.h>
52#include <linux/pci.h>
53#include <linux/skbuff.h>
54#include <linux/timer.h>
55#include <linux/init.h>
56#include <linux/delay.h>
57
58#include <net/arp.h>
59
60#include <asm/io.h>
61#include <asm/types.h>
62#include <asm/byteorder.h>
63#include <asm/irq.h>
64#include <asm/uaccess.h>
65
66#include "sbni.h"
67
68/* device private data */
69
70struct net_local {
71 struct net_device_stats stats;
72 struct timer_list watchdog;
73
74 spinlock_t lock;
75 struct sk_buff *rx_buf_p; /* receive buffer ptr */
76 struct sk_buff *tx_buf_p; /* transmit buffer ptr */
77
78 unsigned int framelen; /* current frame length */
79 unsigned int maxframe; /* maximum valid frame length */
80 unsigned int state;
81 unsigned int inppos, outpos; /* positions in rx/tx buffers */
82
83 /* transmitting frame number - from frames qty to 1 */
84 unsigned int tx_frameno;
85
86 /* expected number of next receiving frame */
87 unsigned int wait_frameno;
88
89 /* count of failed attempts to frame send - 32 attempts do before
90 error - while receiver tunes on opposite side of wire */
91 unsigned int trans_errors;
92
93 /* idle time; send pong when limit exceeded */
94 unsigned int timer_ticks;
95
96 /* fields used for receive level autoselection */
97 int delta_rxl;
98 unsigned int cur_rxl_index, timeout_rxl;
99 unsigned long cur_rxl_rcvd, prev_rxl_rcvd;
100
101 struct sbni_csr1 csr1; /* current value of CSR1 */
102 struct sbni_in_stats in_stats; /* internal statistics */
103
104 struct net_device *second; /* for ISA/dual cards */
105
106#ifdef CONFIG_SBNI_MULTILINE
107 struct net_device *master;
108 struct net_device *link;
109#endif
110};
111
112
113static int sbni_card_probe( unsigned long );
114static int sbni_pci_probe( struct net_device * );
115static struct net_device *sbni_probe1(struct net_device *, unsigned long, int);
116static int sbni_open( struct net_device * );
117static int sbni_close( struct net_device * );
118static int sbni_start_xmit( struct sk_buff *, struct net_device * );
119static int sbni_ioctl( struct net_device *, struct ifreq *, int );
120static struct net_device_stats *sbni_get_stats( struct net_device * );
121static void set_multicast_list( struct net_device * );
122
123static irqreturn_t sbni_interrupt( int, void *, struct pt_regs * );
124static void handle_channel( struct net_device * );
125static int recv_frame( struct net_device * );
126static void send_frame( struct net_device * );
127static int upload_data( struct net_device *,
128 unsigned, unsigned, unsigned, u32 );
129static void download_data( struct net_device *, u32 * );
130static void sbni_watchdog( unsigned long );
131static void interpret_ack( struct net_device *, unsigned );
132static int append_frame_to_pkt( struct net_device *, unsigned, u32 );
133static void indicate_pkt( struct net_device * );
134static void card_start( struct net_device * );
135static void prepare_to_send( struct sk_buff *, struct net_device * );
136static void drop_xmit_queue( struct net_device * );
137static void send_frame_header( struct net_device *, u32 * );
138static int skip_tail( unsigned int, unsigned int, u32 );
139static int check_fhdr( u32, u32 *, u32 *, u32 *, u32 *, u32 * );
140static void change_level( struct net_device * );
141static void timeout_change_level( struct net_device * );
142static u32 calc_crc32( u32, u8 *, u32 );
143static struct sk_buff * get_rx_buf( struct net_device * );
144static int sbni_init( struct net_device * );
145
146#ifdef CONFIG_SBNI_MULTILINE
147static int enslave( struct net_device *, struct net_device * );
148static int emancipate( struct net_device * );
149#endif
150
151#ifdef __i386__
152#define ASM_CRC 1
153#endif
154
155static const char version[] =
156 "Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
157
158static int skip_pci_probe __initdata = 0;
159static int scandone __initdata = 0;
160static int num __initdata = 0;
161
162static unsigned char rxl_tab[];
163static u32 crc32tab[];
164
165/* A list of all installed devices, for removing the driver module. */
166static struct net_device *sbni_cards[ SBNI_MAX_NUM_CARDS ];
167
168/* Lists of device's parameters */
169static u32 io[ SBNI_MAX_NUM_CARDS ] __initdata =
170 { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
171static u32 irq[ SBNI_MAX_NUM_CARDS ] __initdata;
172static u32 baud[ SBNI_MAX_NUM_CARDS ] __initdata;
173static u32 rxl[ SBNI_MAX_NUM_CARDS ] __initdata =
174 { [0 ... SBNI_MAX_NUM_CARDS-1] = -1 };
175static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
176
177#ifndef MODULE
178typedef u32 iarr[];
179static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac };
180#endif
181
182/* A zero-terminated list of I/O addresses to be probed on ISA bus */
183static unsigned int netcard_portlist[ ] __initdata = {
184 0x210, 0x214, 0x220, 0x224, 0x230, 0x234, 0x240, 0x244, 0x250, 0x254,
185 0x260, 0x264, 0x270, 0x274, 0x280, 0x284, 0x290, 0x294, 0x2a0, 0x2a4,
186 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
187 0 };
188
189
190/*
191 * Look for SBNI card which addr stored in dev->base_addr, if nonzero.
192 * Otherwise, look through PCI bus. If none PCI-card was found, scan ISA.
193 */
194
195static inline int __init
196sbni_isa_probe( struct net_device *dev )
197{
198 if( dev->base_addr > 0x1ff
199 && request_region( dev->base_addr, SBNI_IO_EXTENT, dev->name )
200 && sbni_probe1( dev, dev->base_addr, dev->irq ) )
201
202 return 0;
203 else {
204 printk( KERN_ERR "sbni: base address 0x%lx is busy, or adapter "
205 "is malfunctional!\n", dev->base_addr );
206 return -ENODEV;
207 }
208}
209
210static void __init sbni_devsetup(struct net_device *dev)
211{
212 ether_setup( dev );
213 dev->open = &sbni_open;
214 dev->stop = &sbni_close;
215 dev->hard_start_xmit = &sbni_start_xmit;
216 dev->get_stats = &sbni_get_stats;
217 dev->set_multicast_list = &set_multicast_list;
218 dev->do_ioctl = &sbni_ioctl;
219
220 SET_MODULE_OWNER( dev );
221}
222
223int __init sbni_probe(int unit)
224{
225 struct net_device *dev;
226 static unsigned version_printed __initdata = 0;
227 int err;
228
229 dev = alloc_netdev(sizeof(struct net_local), "sbni", sbni_devsetup);
230 if (!dev)
231 return -ENOMEM;
232
233 sprintf(dev->name, "sbni%d", unit);
234 netdev_boot_setup_check(dev);
235
236 err = sbni_init(dev);
237 if (err) {
238 free_netdev(dev);
239 return err;
240 }
241
242 err = register_netdev(dev);
243 if (err) {
244 release_region( dev->base_addr, SBNI_IO_EXTENT );
245 free_netdev(dev);
246 return err;
247 }
248 if( version_printed++ == 0 )
249 printk( KERN_INFO "%s", version );
250 return 0;
251}
252
253static int __init sbni_init(struct net_device *dev)
254{
255 int i;
256 if( dev->base_addr )
257 return sbni_isa_probe( dev );
258 /* otherwise we have to perform search our adapter */
259
260 if( io[ num ] != -1 )
261 dev->base_addr = io[ num ],
262 dev->irq = irq[ num ];
263 else if( scandone || io[ 0 ] != -1 )
264 return -ENODEV;
265
266 /* if io[ num ] contains non-zero address, then that is on ISA bus */
267 if( dev->base_addr )
268 return sbni_isa_probe( dev );
269
270 /* ...otherwise - scan PCI first */
271 if( !skip_pci_probe && !sbni_pci_probe( dev ) )
272 return 0;
273
274 if( io[ num ] == -1 ) {
275 /* Auto-scan will be stopped when first ISA card were found */
276 scandone = 1;
277 if( num > 0 )
278 return -ENODEV;
279 }
280
281 for( i = 0; netcard_portlist[ i ]; ++i ) {
282 int ioaddr = netcard_portlist[ i ];
283 if( request_region( ioaddr, SBNI_IO_EXTENT, dev->name )
284 && sbni_probe1( dev, ioaddr, 0 ))
285 return 0;
286 }
287
288 return -ENODEV;
289}
290
291
292int __init
293sbni_pci_probe( struct net_device *dev )
294{
295 struct pci_dev *pdev = NULL;
296
297 while( (pdev = pci_get_class( PCI_CLASS_NETWORK_OTHER << 8, pdev ))
298 != NULL ) {
299 int pci_irq_line;
300 unsigned long pci_ioaddr;
301 u16 subsys;
302
303 if( pdev->vendor != SBNI_PCI_VENDOR
304 && pdev->device != SBNI_PCI_DEVICE )
305 continue;
306
307 pci_ioaddr = pci_resource_start( pdev, 0 );
308 pci_irq_line = pdev->irq;
309
310 /* Avoid already found cards from previous calls */
311 if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
312 pci_read_config_word( pdev, PCI_SUBSYSTEM_ID, &subsys );
313
314 if (subsys != 2)
315 continue;
316
317 /* Dual adapter is present */
318 if (!request_region(pci_ioaddr += 4, SBNI_IO_EXTENT,
319 dev->name ) )
320 continue;
321 }
322
323 if( pci_irq_line <= 0 || pci_irq_line >= NR_IRQS )
324 printk( KERN_WARNING " WARNING: The PCI BIOS assigned "
325 "this PCI card to IRQ %d, which is unlikely "
326 "to work!.\n"
327 KERN_WARNING " You should use the PCI BIOS "
328 "setup to assign a valid IRQ line.\n",
329 pci_irq_line );
330
331 /* avoiding re-enable dual adapters */
332 if( (pci_ioaddr & 7) == 0 && pci_enable_device( pdev ) ) {
333 release_region( pci_ioaddr, SBNI_IO_EXTENT );
334 pci_dev_put( pdev );
335 return -EIO;
336 }
337 if( sbni_probe1( dev, pci_ioaddr, pci_irq_line ) ) {
338 SET_NETDEV_DEV(dev, &pdev->dev);
339 /* not the best thing to do, but this is all messed up
340 for hotplug systems anyway... */
341 pci_dev_put( pdev );
342 return 0;
343 }
344 }
345 return -ENODEV;
346}
347
348
349static struct net_device * __init
350sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
351{
352 struct net_local *nl;
353
354 if( sbni_card_probe( ioaddr ) ) {
355 release_region( ioaddr, SBNI_IO_EXTENT );
356 return NULL;
357 }
358
359 outb( 0, ioaddr + CSR0 );
360
361 if( irq < 2 ) {
362 unsigned long irq_mask;
363
364 irq_mask = probe_irq_on();
365 outb( EN_INT | TR_REQ, ioaddr + CSR0 );
366 outb( PR_RES, ioaddr + CSR1 );
367 mdelay(50);
368 irq = probe_irq_off(irq_mask);
369 outb( 0, ioaddr + CSR0 );
370
371 if( !irq ) {
372 printk( KERN_ERR "%s: can't detect device irq!\n",
373 dev->name );
374 release_region( ioaddr, SBNI_IO_EXTENT );
375 return NULL;
376 }
377 } else if( irq == 2 )
378 irq = 9;
379
380 dev->irq = irq;
381 dev->base_addr = ioaddr;
382
383 /* Allocate dev->priv and fill in sbni-specific dev fields. */
384 nl = dev->priv;
385 if( !nl ) {
386 printk( KERN_ERR "%s: unable to get memory!\n", dev->name );
387 release_region( ioaddr, SBNI_IO_EXTENT );
388 return NULL;
389 }
390
391 dev->priv = nl;
392 memset( nl, 0, sizeof(struct net_local) );
393 spin_lock_init( &nl->lock );
394
395 /* store MAC address (generate if that isn't known) */
396 *(u16 *)dev->dev_addr = htons( 0x00ff );
397 *(u32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
398 ( (mac[num] ? mac[num] : (u32)((long)dev->priv)) & 0x00ffffff) );
399
400 /* store link settings (speed, receive level ) */
401 nl->maxframe = DEFAULT_FRAME_LEN;
402 nl->csr1.rate = baud[ num ];
403
404 if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
405 /* autotune rxl */
406 nl->cur_rxl_index = DEF_RXL,
407 nl->delta_rxl = DEF_RXL_DELTA;
408 else
409 nl->delta_rxl = 0;
410 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
411 if( inb( ioaddr + CSR0 ) & 0x01 )
412 nl->state |= FL_SLOW_MODE;
413
414 printk( KERN_NOTICE "%s: ioaddr %#lx, irq %d, "
415 "MAC: 00:ff:01:%02x:%02x:%02x\n",
416 dev->name, dev->base_addr, dev->irq,
417 ((u8 *) dev->dev_addr) [3],
418 ((u8 *) dev->dev_addr) [4],
419 ((u8 *) dev->dev_addr) [5] );
420
421 printk( KERN_NOTICE "%s: speed %d, receive level ", dev->name,
422 ( (nl->state & FL_SLOW_MODE) ? 500000 : 2000000)
423 / (1 << nl->csr1.rate) );
424
425 if( nl->delta_rxl == 0 )
426 printk( "0x%x (fixed)\n", nl->cur_rxl_index );
427 else
428 printk( "(auto)\n");
429
430#ifdef CONFIG_SBNI_MULTILINE
431 nl->master = dev;
432 nl->link = NULL;
433#endif
434
435 sbni_cards[ num++ ] = dev;
436 return dev;
437}
438
439/* -------------------------------------------------------------------------- */
440
441#ifdef CONFIG_SBNI_MULTILINE
442
443static int
444sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
445{
446 struct net_device *p;
447
448 netif_stop_queue( dev );
449
450 /* Looking for idle device in the list */
451 for( p = dev; p; ) {
452 struct net_local *nl = (struct net_local *) p->priv;
453 spin_lock( &nl->lock );
454 if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
455 p = nl->link;
456 spin_unlock( &nl->lock );
457 } else {
458 /* Idle dev is found */
459 prepare_to_send( skb, p );
460 spin_unlock( &nl->lock );
461 netif_start_queue( dev );
462 return 0;
463 }
464 }
465
466 return 1;
467}
468
469#else /* CONFIG_SBNI_MULTILINE */
470
471static int
472sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
473{
474 struct net_local *nl = (struct net_local *) dev->priv;
475
476 netif_stop_queue( dev );
477 spin_lock( &nl->lock );
478
479 prepare_to_send( skb, dev );
480
481 spin_unlock( &nl->lock );
482 return 0;
483}
484
485#endif /* CONFIG_SBNI_MULTILINE */
486
487/* -------------------------------------------------------------------------- */
488
489/* interrupt handler */
490
491/*
492 * SBNI12D-10, -11/ISA boards within "common interrupt" mode could not
493 * be looked as two independent single-channel devices. Every channel seems
494 * as Ethernet interface but interrupt handler must be common. Really, first
495 * channel ("master") driver only registers the handler. In its struct net_local
496 * it has got pointer to "slave" channel's struct net_local and handles that's
497 * interrupts too.
498 * dev of successfully attached ISA SBNI boards is linked to list.
499 * While next board driver is initialized, it scans this list. If one
500 * has found dev with same irq and ioaddr different by 4 then it assumes
501 * this board to be "master".
502 */
503
504static irqreturn_t
505sbni_interrupt( int irq, void *dev_id, struct pt_regs *regs )
506{
507 struct net_device *dev = (struct net_device *) dev_id;
508 struct net_local *nl = (struct net_local *) dev->priv;
509 int repeat;
510
511 spin_lock( &nl->lock );
512 if( nl->second )
513 spin_lock( &((struct net_local *) nl->second->priv)->lock );
514
515 do {
516 repeat = 0;
517 if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
518 handle_channel( dev ),
519 repeat = 1;
520 if( nl->second && /* second channel present */
521 (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
522 handle_channel( nl->second ),
523 repeat = 1;
524 } while( repeat );
525
526 if( nl->second )
527 spin_unlock( &((struct net_local *)nl->second->priv)->lock );
528 spin_unlock( &nl->lock );
529 return IRQ_HANDLED;
530}
531
532
533static void
534handle_channel( struct net_device *dev )
535{
536 struct net_local *nl = (struct net_local *) dev->priv;
537 unsigned long ioaddr = dev->base_addr;
538
539 int req_ans;
540 unsigned char csr0;
541
542#ifdef CONFIG_SBNI_MULTILINE
543 /* Lock the master device because we going to change its local data */
544 if( nl->state & FL_SLAVE )
545 spin_lock( &((struct net_local *) nl->master->priv)->lock );
546#endif
547
548 outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
549
550 nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
551 for(;;) {
552 csr0 = inb( ioaddr + CSR0 );
553 if( ( csr0 & (RC_RDY | TR_RDY) ) == 0 )
554 break;
555
556 req_ans = !(nl->state & FL_PREV_OK);
557
558 if( csr0 & RC_RDY )
559 req_ans = recv_frame( dev );
560
561 /*
562 * TR_RDY always equals 1 here because we have owned the marker,
563 * and we set TR_REQ when disabled interrupts
564 */
565 csr0 = inb( ioaddr + CSR0 );
566 if( !(csr0 & TR_RDY) || (csr0 & RC_RDY) )
567 printk( KERN_ERR "%s: internal error!\n", dev->name );
568
569 /* if state & FL_NEED_RESEND != 0 then tx_frameno != 0 */
570 if( req_ans || nl->tx_frameno != 0 )
571 send_frame( dev );
572 else
573 /* send marker without any data */
574 outb( inb( ioaddr + CSR0 ) & ~TR_REQ, ioaddr + CSR0 );
575 }
576
577 outb( inb( ioaddr + CSR0 ) | EN_INT, ioaddr + CSR0 );
578
579#ifdef CONFIG_SBNI_MULTILINE
580 if( nl->state & FL_SLAVE )
581 spin_unlock( &((struct net_local *) nl->master->priv)->lock );
582#endif
583}
584
585
586/*
587 * Routine returns 1 if it need to acknoweledge received frame.
588 * Empty frame received without errors won't be acknoweledged.
589 */
590
591static int
592recv_frame( struct net_device *dev )
593{
594 struct net_local *nl = (struct net_local *) dev->priv;
595 unsigned long ioaddr = dev->base_addr;
596
597 u32 crc = CRC32_INITIAL;
598
599 unsigned framelen, frameno, ack;
600 unsigned is_first, frame_ok;
601
602 if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
603 frame_ok = framelen > 4
604 ? upload_data( dev, framelen, frameno, is_first, crc )
605 : skip_tail( ioaddr, framelen, crc );
606 if( frame_ok )
607 interpret_ack( dev, ack );
608 } else
609 frame_ok = 0;
610
611 outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
612 if( frame_ok ) {
613 nl->state |= FL_PREV_OK;
614 if( framelen > 4 )
615 nl->in_stats.all_rx_number++;
616 } else
617 nl->state &= ~FL_PREV_OK,
618 change_level( dev ),
619 nl->in_stats.all_rx_number++,
620 nl->in_stats.bad_rx_number++;
621
622 return !frame_ok || framelen > 4;
623}
624
625
626static void
627send_frame( struct net_device *dev )
628{
629 struct net_local *nl = (struct net_local *) dev->priv;
630
631 u32 crc = CRC32_INITIAL;
632
633 if( nl->state & FL_NEED_RESEND ) {
634
635 /* if frame was sended but not ACK'ed - resend it */
636 if( nl->trans_errors ) {
637 --nl->trans_errors;
638 if( nl->framelen != 0 )
639 nl->in_stats.resend_tx_number++;
640 } else {
641 /* cannot xmit with many attempts */
642#ifdef CONFIG_SBNI_MULTILINE
643 if( (nl->state & FL_SLAVE) || nl->link )
644#endif
645 nl->state |= FL_LINE_DOWN;
646 drop_xmit_queue( dev );
647 goto do_send;
648 }
649 } else
650 nl->trans_errors = TR_ERROR_COUNT;
651
652 send_frame_header( dev, &crc );
653 nl->state |= FL_NEED_RESEND;
654 /*
655 * FL_NEED_RESEND will be cleared after ACK, but if empty
656 * frame sended then in prepare_to_send next frame
657 */
658
659
660 if( nl->framelen ) {
661 download_data( dev, &crc );
662 nl->in_stats.all_tx_number++;
663 nl->state |= FL_WAIT_ACK;
664 }
665
666 outsb( dev->base_addr + DAT, (u8 *)&crc, sizeof crc );
667
668do_send:
669 outb( inb( dev->base_addr + CSR0 ) & ~TR_REQ, dev->base_addr + CSR0 );
670
671 if( nl->tx_frameno )
672 /* next frame exists - we request card to send it */
673 outb( inb( dev->base_addr + CSR0 ) | TR_REQ,
674 dev->base_addr + CSR0 );
675}
676
677
678/*
679 * Write the frame data into adapter's buffer memory, and calculate CRC.
680 * Do padding if necessary.
681 */
682
683static void
684download_data( struct net_device *dev, u32 *crc_p )
685{
686 struct net_local *nl = (struct net_local *) dev->priv;
687 struct sk_buff *skb = nl->tx_buf_p;
688
689 unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
690
691 outsb( dev->base_addr + DAT, skb->data + nl->outpos, len );
692 *crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
693
694 /* if packet too short we should write some more bytes to pad */
695 for( len = nl->framelen - len; len--; )
696 outb( 0, dev->base_addr + DAT ),
697 *crc_p = CRC32( 0, *crc_p );
698}
699
700
701static int
702upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
703 unsigned is_first, u32 crc )
704{
705 struct net_local *nl = (struct net_local *) dev->priv;
706
707 int frame_ok;
708
709 if( is_first )
710 nl->wait_frameno = frameno,
711 nl->inppos = 0;
712
713 if( nl->wait_frameno == frameno ) {
714
715 if( nl->inppos + framelen <= ETHER_MAX_LEN )
716 frame_ok = append_frame_to_pkt( dev, framelen, crc );
717
718 /*
719 * if CRC is right but framelen incorrect then transmitter
720 * error was occurred... drop entire packet
721 */
722 else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
723 != 0 )
724 nl->wait_frameno = 0,
725 nl->inppos = 0,
726#ifdef CONFIG_SBNI_MULTILINE
727 ((struct net_local *) nl->master->priv)
728 ->stats.rx_errors++,
729 ((struct net_local *) nl->master->priv)
730 ->stats.rx_missed_errors++;
731#else
732 nl->stats.rx_errors++,
733 nl->stats.rx_missed_errors++;
734#endif
735 /* now skip all frames until is_first != 0 */
736 } else
737 frame_ok = skip_tail( dev->base_addr, framelen, crc );
738
739 if( is_first && !frame_ok )
740 /*
741 * Frame has been broken, but we had already stored
742 * is_first... Drop entire packet.
743 */
744 nl->wait_frameno = 0,
745#ifdef CONFIG_SBNI_MULTILINE
746 ((struct net_local *) nl->master->priv)->stats.rx_errors++,
747 ((struct net_local *) nl->master->priv)->stats.rx_crc_errors++;
748#else
749 nl->stats.rx_errors++,
750 nl->stats.rx_crc_errors++;
751#endif
752
753 return frame_ok;
754}
755
756
757static __inline void
758send_complete( struct net_local *nl )
759{
760#ifdef CONFIG_SBNI_MULTILINE
761 ((struct net_local *) nl->master->priv)->stats.tx_packets++;
762 ((struct net_local *) nl->master->priv)->stats.tx_bytes
763 += nl->tx_buf_p->len;
764#else
765 nl->stats.tx_packets++;
766 nl->stats.tx_bytes += nl->tx_buf_p->len;
767#endif
768 dev_kfree_skb_irq( nl->tx_buf_p );
769
770 nl->tx_buf_p = NULL;
771
772 nl->outpos = 0;
773 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
774 nl->framelen = 0;
775}
776
777
778static void
779interpret_ack( struct net_device *dev, unsigned ack )
780{
781 struct net_local *nl = (struct net_local *) dev->priv;
782
783 if( ack == FRAME_SENT_OK ) {
784 nl->state &= ~FL_NEED_RESEND;
785
786 if( nl->state & FL_WAIT_ACK ) {
787 nl->outpos += nl->framelen;
788
789 if( --nl->tx_frameno )
790 nl->framelen = min_t(unsigned int,
791 nl->maxframe,
792 nl->tx_buf_p->len - nl->outpos);
793 else
794 send_complete( nl ),
795#ifdef CONFIG_SBNI_MULTILINE
796 netif_wake_queue( nl->master );
797#else
798 netif_wake_queue( dev );
799#endif
800 }
801 }
802
803 nl->state &= ~FL_WAIT_ACK;
804}
805
806
807/*
808 * Glue received frame with previous fragments of packet.
809 * Indicate packet when last frame would be accepted.
810 */
811
812static int
813append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
814{
815 struct net_local *nl = (struct net_local *) dev->priv;
816
817 u8 *p;
818
819 if( nl->inppos + framelen > ETHER_MAX_LEN )
820 return 0;
821
822 if( !nl->rx_buf_p && !(nl->rx_buf_p = get_rx_buf( dev )) )
823 return 0;
824
825 p = nl->rx_buf_p->data + nl->inppos;
826 insb( dev->base_addr + DAT, p, framelen );
827 if( calc_crc32( crc, p, framelen ) != CRC32_REMAINDER )
828 return 0;
829
830 nl->inppos += framelen - 4;
831 if( --nl->wait_frameno == 0 ) /* last frame received */
832 indicate_pkt( dev );
833
834 return 1;
835}
836
837
838/*
839 * Prepare to start output on adapter.
840 * Transmitter will be actually activated when marker is accepted.
841 */
842
843static void
844prepare_to_send( struct sk_buff *skb, struct net_device *dev )
845{
846 struct net_local *nl = (struct net_local *) dev->priv;
847
848 unsigned int len;
849
850 /* nl->tx_buf_p == NULL here! */
851 if( nl->tx_buf_p )
852 printk( KERN_ERR "%s: memory leak!\n", dev->name );
853
854 nl->outpos = 0;
855 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
856
857 len = skb->len;
858 if( len < SBNI_MIN_LEN )
859 len = SBNI_MIN_LEN;
860
861 nl->tx_buf_p = skb;
862 nl->tx_frameno = (len + nl->maxframe - 1) / nl->maxframe;
863 nl->framelen = len < nl->maxframe ? len : nl->maxframe;
864
865 outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
866#ifdef CONFIG_SBNI_MULTILINE
867 nl->master->trans_start = jiffies;
868#else
869 dev->trans_start = jiffies;
870#endif
871}
872
873
874static void
875drop_xmit_queue( struct net_device *dev )
876{
877 struct net_local *nl = (struct net_local *) dev->priv;
878
879 if( nl->tx_buf_p )
880 dev_kfree_skb_any( nl->tx_buf_p ),
881 nl->tx_buf_p = NULL,
882#ifdef CONFIG_SBNI_MULTILINE
883 ((struct net_local *) nl->master->priv)
884 ->stats.tx_errors++,
885 ((struct net_local *) nl->master->priv)
886 ->stats.tx_carrier_errors++;
887#else
888 nl->stats.tx_errors++,
889 nl->stats.tx_carrier_errors++;
890#endif
891
892 nl->tx_frameno = 0;
893 nl->framelen = 0;
894 nl->outpos = 0;
895 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
896#ifdef CONFIG_SBNI_MULTILINE
897 netif_start_queue( nl->master );
898 nl->master->trans_start = jiffies;
899#else
900 netif_start_queue( dev );
901 dev->trans_start = jiffies;
902#endif
903}
904
905
906static void
907send_frame_header( struct net_device *dev, u32 *crc_p )
908{
909 struct net_local *nl = (struct net_local *) dev->priv;
910
911 u32 crc = *crc_p;
912 u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */
913 u8 value;
914
915 if( nl->state & FL_NEED_RESEND )
916 len_field |= FRAME_RETRY; /* non-first attempt... */
917
918 if( nl->outpos == 0 )
919 len_field |= FRAME_FIRST;
920
921 len_field |= (nl->state & FL_PREV_OK) ? FRAME_SENT_OK : FRAME_SENT_BAD;
922 outb( SBNI_SIG, dev->base_addr + DAT );
923
924 value = (u8) len_field;
925 outb( value, dev->base_addr + DAT );
926 crc = CRC32( value, crc );
927 value = (u8) (len_field >> 8);
928 outb( value, dev->base_addr + DAT );
929 crc = CRC32( value, crc );
930
931 outb( nl->tx_frameno, dev->base_addr + DAT );
932 crc = CRC32( nl->tx_frameno, crc );
933 outb( 0, dev->base_addr + DAT );
934 crc = CRC32( 0, crc );
935 *crc_p = crc;
936}
937
938
939/*
940 * if frame tail not needed (incorrect number or received twice),
941 * it won't store, but CRC will be calculated
942 */
943
944static int
945skip_tail( unsigned int ioaddr, unsigned int tail_len, u32 crc )
946{
947 while( tail_len-- )
948 crc = CRC32( inb( ioaddr + DAT ), crc );
949
950 return crc == CRC32_REMAINDER;
951}
952
953
954/*
955 * Preliminary checks if frame header is correct, calculates its CRC
956 * and split it to simple fields
957 */
958
959static int
960check_fhdr( u32 ioaddr, u32 *framelen, u32 *frameno, u32 *ack,
961 u32 *is_first, u32 *crc_p )
962{
963 u32 crc = *crc_p;
964 u8 value;
965
966 if( inb( ioaddr + DAT ) != SBNI_SIG )
967 return 0;
968
969 value = inb( ioaddr + DAT );
970 *framelen = (u32)value;
971 crc = CRC32( value, crc );
972 value = inb( ioaddr + DAT );
973 *framelen |= ((u32)value) << 8;
974 crc = CRC32( value, crc );
975
976 *ack = *framelen & FRAME_ACK_MASK;
977 *is_first = (*framelen & FRAME_FIRST) != 0;
978
979 if( (*framelen &= FRAME_LEN_MASK) < 6
980 || *framelen > SBNI_MAX_FRAME - 3 )
981 return 0;
982
983 value = inb( ioaddr + DAT );
984 *frameno = (u32)value;
985 crc = CRC32( value, crc );
986
987 crc = CRC32( inb( ioaddr + DAT ), crc ); /* reserved byte */
988 *framelen -= 2;
989
990 *crc_p = crc;
991 return 1;
992}
993
994
995static struct sk_buff *
996get_rx_buf( struct net_device *dev )
997{
998 /* +2 is to compensate for the alignment fixup below */
999 struct sk_buff *skb = dev_alloc_skb( ETHER_MAX_LEN + 2 );
1000 if( !skb )
1001 return NULL;
1002
1003#ifdef CONFIG_SBNI_MULTILINE
1004 skb->dev = ((struct net_local *) dev->priv)->master;
1005#else
1006 skb->dev = dev;
1007#endif
1008 skb_reserve( skb, 2 ); /* Align IP on longword boundaries */
1009 return skb;
1010}
1011
1012
1013static void
1014indicate_pkt( struct net_device *dev )
1015{
1016 struct net_local *nl = (struct net_local *) dev->priv;
1017 struct sk_buff *skb = nl->rx_buf_p;
1018
1019 skb_put( skb, nl->inppos );
1020
1021#ifdef CONFIG_SBNI_MULTILINE
1022 skb->protocol = eth_type_trans( skb, nl->master );
1023 netif_rx( skb );
1024 dev->last_rx = jiffies;
1025 ++((struct net_local *) nl->master->priv)->stats.rx_packets;
1026 ((struct net_local *) nl->master->priv)->stats.rx_bytes += nl->inppos;
1027#else
1028 skb->protocol = eth_type_trans( skb, dev );
1029 netif_rx( skb );
1030 dev->last_rx = jiffies;
1031 ++nl->stats.rx_packets;
1032 nl->stats.rx_bytes += nl->inppos;
1033#endif
1034 nl->rx_buf_p = NULL; /* protocol driver will clear this sk_buff */
1035}
1036
1037
1038/* -------------------------------------------------------------------------- */
1039
1040/*
1041 * Routine checks periodically wire activity and regenerates marker if
1042 * connect was inactive for a long time.
1043 */
1044
1045static void
1046sbni_watchdog( unsigned long arg )
1047{
1048 struct net_device *dev = (struct net_device *) arg;
1049 struct net_local *nl = (struct net_local *) dev->priv;
1050 struct timer_list *w = &nl->watchdog;
1051 unsigned long flags;
1052 unsigned char csr0;
1053
1054 spin_lock_irqsave( &nl->lock, flags );
1055
1056 csr0 = inb( dev->base_addr + CSR0 );
1057 if( csr0 & RC_CHK ) {
1058
1059 if( nl->timer_ticks ) {
1060 if( csr0 & (RC_RDY | BU_EMP) )
1061 /* receiving not active */
1062 nl->timer_ticks--;
1063 } else {
1064 nl->in_stats.timeout_number++;
1065 if( nl->delta_rxl )
1066 timeout_change_level( dev );
1067
1068 outb( *(u_char *)&nl->csr1 | PR_RES,
1069 dev->base_addr + CSR1 );
1070 csr0 = inb( dev->base_addr + CSR0 );
1071 }
1072 } else
1073 nl->state &= ~FL_LINE_DOWN;
1074
1075 outb( csr0 | RC_CHK, dev->base_addr + CSR0 );
1076
1077 init_timer( w );
1078 w->expires = jiffies + SBNI_TIMEOUT;
1079 w->data = arg;
1080 w->function = sbni_watchdog;
1081 add_timer( w );
1082
1083 spin_unlock_irqrestore( &nl->lock, flags );
1084}
1085
1086
1087static unsigned char rxl_tab[] = {
1088 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x08,
1089 0x0a, 0x0c, 0x0f, 0x16, 0x18, 0x1a, 0x1c, 0x1f
1090};
1091
1092#define SIZE_OF_TIMEOUT_RXL_TAB 4
1093static unsigned char timeout_rxl_tab[] = {
1094 0x03, 0x05, 0x08, 0x0b
1095};
1096
1097/* -------------------------------------------------------------------------- */
1098
1099static void
1100card_start( struct net_device *dev )
1101{
1102 struct net_local *nl = (struct net_local *) dev->priv;
1103
1104 nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
1105 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
1106 nl->state |= FL_PREV_OK;
1107
1108 nl->inppos = nl->outpos = 0;
1109 nl->wait_frameno = 0;
1110 nl->tx_frameno = 0;
1111 nl->framelen = 0;
1112
1113 outb( *(u_char *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
1114 outb( EN_INT, dev->base_addr + CSR0 );
1115}
1116
1117/* -------------------------------------------------------------------------- */
1118
1119/* Receive level auto-selection */
1120
1121static void
1122change_level( struct net_device *dev )
1123{
1124 struct net_local *nl = (struct net_local *) dev->priv;
1125
1126 if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */
1127 return;
1128
1129 if( nl->cur_rxl_index == 0 )
1130 nl->delta_rxl = 1;
1131 else if( nl->cur_rxl_index == 15 )
1132 nl->delta_rxl = -1;
1133 else if( nl->cur_rxl_rcvd < nl->prev_rxl_rcvd )
1134 nl->delta_rxl = -nl->delta_rxl;
1135
1136 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index += nl->delta_rxl ];
1137 inb( dev->base_addr + CSR0 ); /* needs for PCI cards */
1138 outb( *(u8 *)&nl->csr1, dev->base_addr + CSR1 );
1139
1140 nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
1141 nl->cur_rxl_rcvd = 0;
1142}
1143
1144
1145static void
1146timeout_change_level( struct net_device *dev )
1147{
1148 struct net_local *nl = (struct net_local *) dev->priv;
1149
1150 nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
1151 if( ++nl->timeout_rxl >= 4 )
1152 nl->timeout_rxl = 0;
1153
1154 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
1155 inb( dev->base_addr + CSR0 );
1156 outb( *(unsigned char *)&nl->csr1, dev->base_addr + CSR1 );
1157
1158 nl->prev_rxl_rcvd = nl->cur_rxl_rcvd;
1159 nl->cur_rxl_rcvd = 0;
1160}
1161
1162/* -------------------------------------------------------------------------- */
1163
1164/*
1165 * Open/initialize the board.
1166 */
1167
1168static int
1169sbni_open( struct net_device *dev )
1170{
1171 struct net_local *nl = (struct net_local *) dev->priv;
1172 struct timer_list *w = &nl->watchdog;
1173
1174 /*
1175 * For double ISA adapters within "common irq" mode, we have to
1176 * determine whether primary or secondary channel is initialized,
1177 * and set the irq handler only in first case.
1178 */
1179 if( dev->base_addr < 0x400 ) { /* ISA only */
1180 struct net_device **p = sbni_cards;
1181 for( ; *p && p < sbni_cards + SBNI_MAX_NUM_CARDS; ++p )
1182 if( (*p)->irq == dev->irq
1183 && ((*p)->base_addr == dev->base_addr + 4
1184 || (*p)->base_addr == dev->base_addr - 4)
1185 && (*p)->flags & IFF_UP ) {
1186
1187 ((struct net_local *) ((*p)->priv))
1188 ->second = dev;
1189 printk( KERN_NOTICE "%s: using shared irq "
1190 "with %s\n", dev->name, (*p)->name );
1191 nl->state |= FL_SECONDARY;
1192 goto handler_attached;
1193 }
1194 }
1195
1196 if( request_irq(dev->irq, sbni_interrupt, SA_SHIRQ, dev->name, dev) ) {
1197 printk( KERN_ERR "%s: unable to get IRQ %d.\n",
1198 dev->name, dev->irq );
1199 return -EAGAIN;
1200 }
1201
1202handler_attached:
1203
1204 spin_lock( &nl->lock );
1205 memset( &nl->stats, 0, sizeof(struct net_device_stats) );
1206 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
1207
1208 card_start( dev );
1209
1210 netif_start_queue( dev );
1211
1212 /* set timer watchdog */
1213 init_timer( w );
1214 w->expires = jiffies + SBNI_TIMEOUT;
1215 w->data = (unsigned long) dev;
1216 w->function = sbni_watchdog;
1217 add_timer( w );
1218
1219 spin_unlock( &nl->lock );
1220 return 0;
1221}
1222
1223
1224static int
1225sbni_close( struct net_device *dev )
1226{
1227 struct net_local *nl = (struct net_local *) dev->priv;
1228
1229 if( nl->second && nl->second->flags & IFF_UP ) {
1230 printk( KERN_NOTICE "Secondary channel (%s) is active!\n",
1231 nl->second->name );
1232 return -EBUSY;
1233 }
1234
1235#ifdef CONFIG_SBNI_MULTILINE
1236 if( nl->state & FL_SLAVE )
1237 emancipate( dev );
1238 else
1239 while( nl->link ) /* it's master device! */
1240 emancipate( nl->link );
1241#endif
1242
1243 spin_lock( &nl->lock );
1244
1245 nl->second = NULL;
1246 drop_xmit_queue( dev );
1247 netif_stop_queue( dev );
1248
1249 del_timer( &nl->watchdog );
1250
1251 outb( 0, dev->base_addr + CSR0 );
1252
1253 if( !(nl->state & FL_SECONDARY) )
1254 free_irq( dev->irq, dev );
1255 nl->state &= FL_SECONDARY;
1256
1257 spin_unlock( &nl->lock );
1258 return 0;
1259}
1260
1261
1262/*
1263 Valid combinations in CSR0 (for probing):
1264
1265 VALID_DECODER 0000,0011,1011,1010
1266
1267 ; 0 ; -
1268 TR_REQ ; 1 ; +
1269 TR_RDY ; 2 ; -
1270 TR_RDY TR_REQ ; 3 ; +
1271 BU_EMP ; 4 ; +
1272 BU_EMP TR_REQ ; 5 ; +
1273 BU_EMP TR_RDY ; 6 ; -
1274 BU_EMP TR_RDY TR_REQ ; 7 ; +
1275 RC_RDY ; 8 ; +
1276 RC_RDY TR_REQ ; 9 ; +
1277 RC_RDY TR_RDY ; 10 ; -
1278 RC_RDY TR_RDY TR_REQ ; 11 ; -
1279 RC_RDY BU_EMP ; 12 ; -
1280 RC_RDY BU_EMP TR_REQ ; 13 ; -
1281 RC_RDY BU_EMP TR_RDY ; 14 ; -
1282 RC_RDY BU_EMP TR_RDY TR_REQ ; 15 ; -
1283*/
1284
1285#define VALID_DECODER (2 + 8 + 0x10 + 0x20 + 0x80 + 0x100 + 0x200)
1286
1287
1288static int
1289sbni_card_probe( unsigned long ioaddr )
1290{
1291 unsigned char csr0;
1292
1293 csr0 = inb( ioaddr + CSR0 );
1294 if( csr0 != 0xff && csr0 != 0x00 ) {
1295 csr0 &= ~EN_INT;
1296 if( csr0 & BU_EMP )
1297 csr0 |= EN_INT;
1298
1299 if( VALID_DECODER & (1 << (csr0 >> 4)) )
1300 return 0;
1301 }
1302
1303 return -ENODEV;
1304}
1305
1306/* -------------------------------------------------------------------------- */
1307
1308static int
1309sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
1310{
1311 struct net_local *nl = (struct net_local *) dev->priv;
1312 struct sbni_flags flags;
1313 int error = 0;
1314
1315#ifdef CONFIG_SBNI_MULTILINE
1316 struct net_device *slave_dev;
1317 char slave_name[ 8 ];
1318#endif
1319
1320 switch( cmd ) {
1321 case SIOCDEVGETINSTATS :
1322 if (copy_to_user( ifr->ifr_data, &nl->in_stats,
1323 sizeof(struct sbni_in_stats) ))
1324 error = -EFAULT;
1325 break;
1326
1327 case SIOCDEVRESINSTATS :
1328 if( current->euid != 0 ) /* root only */
1329 return -EPERM;
1330 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) );
1331 break;
1332
1333 case SIOCDEVGHWSTATE :
1334 flags.mac_addr = *(u32 *)(dev->dev_addr + 3);
1335 flags.rate = nl->csr1.rate;
1336 flags.slow_mode = (nl->state & FL_SLOW_MODE) != 0;
1337 flags.rxl = nl->cur_rxl_index;
1338 flags.fixed_rxl = nl->delta_rxl == 0;
1339
1340 if (copy_to_user( ifr->ifr_data, &flags, sizeof flags ))
1341 error = -EFAULT;
1342 break;
1343
1344 case SIOCDEVSHWSTATE :
1345 if( current->euid != 0 ) /* root only */
1346 return -EPERM;
1347
1348 spin_lock( &nl->lock );
1349 flags = *(struct sbni_flags*) &ifr->ifr_ifru;
1350 if( flags.fixed_rxl )
1351 nl->delta_rxl = 0,
1352 nl->cur_rxl_index = flags.rxl;
1353 else
1354 nl->delta_rxl = DEF_RXL_DELTA,
1355 nl->cur_rxl_index = DEF_RXL;
1356
1357 nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
1358 nl->csr1.rate = flags.rate;
1359 outb( *(u8 *)&nl->csr1 | PR_RES, dev->base_addr + CSR1 );
1360 spin_unlock( &nl->lock );
1361 break;
1362
1363#ifdef CONFIG_SBNI_MULTILINE
1364
1365 case SIOCDEVENSLAVE :
1366 if( current->euid != 0 ) /* root only */
1367 return -EPERM;
1368
1369 if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name ))
1370 return -EFAULT;
1371 slave_dev = dev_get_by_name( slave_name );
1372 if( !slave_dev || !(slave_dev->flags & IFF_UP) ) {
1373 printk( KERN_ERR "%s: trying to enslave non-active "
1374 "device %s\n", dev->name, slave_name );
1375 return -EPERM;
1376 }
1377
1378 return enslave( dev, slave_dev );
1379
1380 case SIOCDEVEMANSIPATE :
1381 if( current->euid != 0 ) /* root only */
1382 return -EPERM;
1383
1384 return emancipate( dev );
1385
1386#endif /* CONFIG_SBNI_MULTILINE */
1387
1388 default :
1389 return -EOPNOTSUPP;
1390 }
1391
1392 return error;
1393}
1394
1395
1396#ifdef CONFIG_SBNI_MULTILINE
1397
1398static int
1399enslave( struct net_device *dev, struct net_device *slave_dev )
1400{
1401 struct net_local *nl = (struct net_local *) dev->priv;
1402 struct net_local *snl = (struct net_local *) slave_dev->priv;
1403
1404 if( nl->state & FL_SLAVE ) /* This isn't master or free device */
1405 return -EBUSY;
1406
1407 if( snl->state & FL_SLAVE ) /* That was already enslaved */
1408 return -EBUSY;
1409
1410 spin_lock( &nl->lock );
1411 spin_lock( &snl->lock );
1412
1413 /* append to list */
1414 snl->link = nl->link;
1415 nl->link = slave_dev;
1416 snl->master = dev;
1417 snl->state |= FL_SLAVE;
1418
1419 /* Summary statistics of MultiLine operation will be stored
1420 in master's counters */
1421 memset( &snl->stats, 0, sizeof(struct net_device_stats) );
1422 netif_stop_queue( slave_dev );
1423 netif_wake_queue( dev ); /* Now we are able to transmit */
1424
1425 spin_unlock( &snl->lock );
1426 spin_unlock( &nl->lock );
1427 printk( KERN_NOTICE "%s: slave device (%s) attached.\n",
1428 dev->name, slave_dev->name );
1429 return 0;
1430}
1431
1432
1433static int
1434emancipate( struct net_device *dev )
1435{
1436 struct net_local *snl = (struct net_local *) dev->priv;
1437 struct net_device *p = snl->master;
1438 struct net_local *nl = (struct net_local *) p->priv;
1439
1440 if( !(snl->state & FL_SLAVE) )
1441 return -EINVAL;
1442
1443 spin_lock( &nl->lock );
1444 spin_lock( &snl->lock );
1445 drop_xmit_queue( dev );
1446
1447 /* exclude from list */
1448 for(;;) { /* must be in list */
1449 struct net_local *t = (struct net_local *) p->priv;
1450 if( t->link == dev ) {
1451 t->link = snl->link;
1452 break;
1453 }
1454 p = t->link;
1455 }
1456
1457 snl->link = NULL;
1458 snl->master = dev;
1459 snl->state &= ~FL_SLAVE;
1460
1461 netif_start_queue( dev );
1462
1463 spin_unlock( &snl->lock );
1464 spin_unlock( &nl->lock );
1465
1466 dev_put( dev );
1467 return 0;
1468}
1469
1470#endif
1471
1472
1473static struct net_device_stats *
1474sbni_get_stats( struct net_device *dev )
1475{
1476 return &((struct net_local *) dev->priv)->stats;
1477}
1478
1479
1480static void
1481set_multicast_list( struct net_device *dev )
1482{
1483 return; /* sbni always operate in promiscuos mode */
1484}
1485
1486
1487#ifdef MODULE
1488module_param_array(io, int, NULL, 0);
1489module_param_array(irq, int, NULL, 0);
1490module_param_array(baud, int, NULL, 0);
1491module_param_array(rxl, int, NULL, 0);
1492module_param_array(mac, int, NULL, 0);
1493module_param(skip_pci_probe, bool, 0);
1494
1495MODULE_LICENSE("GPL");
1496
1497
1498int
1499init_module( void )
1500{
1501 struct net_device *dev;
1502 int err;
1503
1504 while( num < SBNI_MAX_NUM_CARDS ) {
1505 dev = alloc_netdev(sizeof(struct net_local),
1506 "sbni%d", sbni_devsetup);
1507 if( !dev)
1508 break;
1509
1510 sprintf( dev->name, "sbni%d", num );
1511
1512 err = sbni_init(dev);
1513 if (err) {
1514 free_netdev(dev);
1515 break;
1516 }
1517
1518 if( register_netdev( dev ) ) {
1519 release_region( dev->base_addr, SBNI_IO_EXTENT );
1520 free_netdev( dev );
1521 break;
1522 }
1523 }
1524
1525 return *sbni_cards ? 0 : -ENODEV;
1526}
1527
1528void
1529cleanup_module( void )
1530{
1531 struct net_device *dev;
1532 int num;
1533
1534 for( num = 0; num < SBNI_MAX_NUM_CARDS; ++num )
1535 if( (dev = sbni_cards[ num ]) != NULL ) {
1536 unregister_netdev( dev );
1537 release_region( dev->base_addr, SBNI_IO_EXTENT );
1538 free_netdev( dev );
1539 }
1540}
1541
1542#else /* MODULE */
1543
1544static int __init
1545sbni_setup( char *p )
1546{
1547 int n, parm;
1548
1549 if( *p++ != '(' )
1550 goto bad_param;
1551
1552 for( n = 0, parm = 0; *p && n < 8; ) {
1553 (*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
1554 if( !*p || *p == ')' )
1555 return 1;
1556 if( *p == ';' )
1557 ++p, ++n, parm = 0;
1558 else if( *p++ != ',' )
1559 break;
1560 else
1561 if( ++parm >= 5 )
1562 break;
1563 }
1564bad_param:
1565 printk( KERN_ERR "Error in sbni kernel parameter!\n" );
1566 return 0;
1567}
1568
1569__setup( "sbni=", sbni_setup );
1570
1571#endif /* MODULE */
1572
1573/* -------------------------------------------------------------------------- */
1574
1575#ifdef ASM_CRC
1576
1577static u32
1578calc_crc32( u32 crc, u8 *p, u32 len )
1579{
1580 register u32 _crc;
1581 _crc = crc;
1582
1583 __asm__ __volatile__ (
1584 "xorl %%ebx, %%ebx\n"
1585 "movl %2, %%esi\n"
1586 "movl %3, %%ecx\n"
1587 "movl $crc32tab, %%edi\n"
1588 "shrl $2, %%ecx\n"
1589 "jz 1f\n"
1590
1591 ".align 4\n"
1592 "0:\n"
1593 "movb %%al, %%bl\n"
1594 "movl (%%esi), %%edx\n"
1595 "shrl $8, %%eax\n"
1596 "xorb %%dl, %%bl\n"
1597 "shrl $8, %%edx\n"
1598 "xorl (%%edi,%%ebx,4), %%eax\n"
1599
1600 "movb %%al, %%bl\n"
1601 "shrl $8, %%eax\n"
1602 "xorb %%dl, %%bl\n"
1603 "shrl $8, %%edx\n"
1604 "xorl (%%edi,%%ebx,4), %%eax\n"
1605
1606 "movb %%al, %%bl\n"
1607 "shrl $8, %%eax\n"
1608 "xorb %%dl, %%bl\n"
1609 "movb %%dh, %%dl\n"
1610 "xorl (%%edi,%%ebx,4), %%eax\n"
1611
1612 "movb %%al, %%bl\n"
1613 "shrl $8, %%eax\n"
1614 "xorb %%dl, %%bl\n"
1615 "addl $4, %%esi\n"
1616 "xorl (%%edi,%%ebx,4), %%eax\n"
1617
1618 "decl %%ecx\n"
1619 "jnz 0b\n"
1620
1621 "1:\n"
1622 "movl %3, %%ecx\n"
1623 "andl $3, %%ecx\n"
1624 "jz 2f\n"
1625
1626 "movb %%al, %%bl\n"
1627 "shrl $8, %%eax\n"
1628 "xorb (%%esi), %%bl\n"
1629 "xorl (%%edi,%%ebx,4), %%eax\n"
1630
1631 "decl %%ecx\n"
1632 "jz 2f\n"
1633
1634 "movb %%al, %%bl\n"
1635 "shrl $8, %%eax\n"
1636 "xorb 1(%%esi), %%bl\n"
1637 "xorl (%%edi,%%ebx,4), %%eax\n"
1638
1639 "decl %%ecx\n"
1640 "jz 2f\n"
1641
1642 "movb %%al, %%bl\n"
1643 "shrl $8, %%eax\n"
1644 "xorb 2(%%esi), %%bl\n"
1645 "xorl (%%edi,%%ebx,4), %%eax\n"
1646 "2:\n"
1647 : "=a" (_crc)
1648 : "0" (_crc), "g" (p), "g" (len)
1649 : "bx", "cx", "dx", "si", "di"
1650 );
1651
1652 return _crc;
1653}
1654
1655#else /* ASM_CRC */
1656
1657static u32
1658calc_crc32( u32 crc, u8 *p, u32 len )
1659{
1660 while( len-- )
1661 crc = CRC32( *p++, crc );
1662
1663 return crc;
1664}
1665
1666#endif /* ASM_CRC */
1667
1668
1669static u32 crc32tab[] __attribute__ ((aligned(8))) = {
1670 0xD202EF8D, 0xA505DF1B, 0x3C0C8EA1, 0x4B0BBE37,
1671 0xD56F2B94, 0xA2681B02, 0x3B614AB8, 0x4C667A2E,
1672 0xDCD967BF, 0xABDE5729, 0x32D70693, 0x45D03605,
1673 0xDBB4A3A6, 0xACB39330, 0x35BAC28A, 0x42BDF21C,
1674 0xCFB5FFE9, 0xB8B2CF7F, 0x21BB9EC5, 0x56BCAE53,
1675 0xC8D83BF0, 0xBFDF0B66, 0x26D65ADC, 0x51D16A4A,
1676 0xC16E77DB, 0xB669474D, 0x2F6016F7, 0x58672661,
1677 0xC603B3C2, 0xB1048354, 0x280DD2EE, 0x5F0AE278,
1678 0xE96CCF45, 0x9E6BFFD3, 0x0762AE69, 0x70659EFF,
1679 0xEE010B5C, 0x99063BCA, 0x000F6A70, 0x77085AE6,
1680 0xE7B74777, 0x90B077E1, 0x09B9265B, 0x7EBE16CD,
1681 0xE0DA836E, 0x97DDB3F8, 0x0ED4E242, 0x79D3D2D4,
1682 0xF4DBDF21, 0x83DCEFB7, 0x1AD5BE0D, 0x6DD28E9B,
1683 0xF3B61B38, 0x84B12BAE, 0x1DB87A14, 0x6ABF4A82,
1684 0xFA005713, 0x8D076785, 0x140E363F, 0x630906A9,
1685 0xFD6D930A, 0x8A6AA39C, 0x1363F226, 0x6464C2B0,
1686 0xA4DEAE1D, 0xD3D99E8B, 0x4AD0CF31, 0x3DD7FFA7,
1687 0xA3B36A04, 0xD4B45A92, 0x4DBD0B28, 0x3ABA3BBE,
1688 0xAA05262F, 0xDD0216B9, 0x440B4703, 0x330C7795,
1689 0xAD68E236, 0xDA6FD2A0, 0x4366831A, 0x3461B38C,
1690 0xB969BE79, 0xCE6E8EEF, 0x5767DF55, 0x2060EFC3,
1691 0xBE047A60, 0xC9034AF6, 0x500A1B4C, 0x270D2BDA,
1692 0xB7B2364B, 0xC0B506DD, 0x59BC5767, 0x2EBB67F1,
1693 0xB0DFF252, 0xC7D8C2C4, 0x5ED1937E, 0x29D6A3E8,
1694 0x9FB08ED5, 0xE8B7BE43, 0x71BEEFF9, 0x06B9DF6F,
1695 0x98DD4ACC, 0xEFDA7A5A, 0x76D32BE0, 0x01D41B76,
1696 0x916B06E7, 0xE66C3671, 0x7F6567CB, 0x0862575D,
1697 0x9606C2FE, 0xE101F268, 0x7808A3D2, 0x0F0F9344,
1698 0x82079EB1, 0xF500AE27, 0x6C09FF9D, 0x1B0ECF0B,
1699 0x856A5AA8, 0xF26D6A3E, 0x6B643B84, 0x1C630B12,
1700 0x8CDC1683, 0xFBDB2615, 0x62D277AF, 0x15D54739,
1701 0x8BB1D29A, 0xFCB6E20C, 0x65BFB3B6, 0x12B88320,
1702 0x3FBA6CAD, 0x48BD5C3B, 0xD1B40D81, 0xA6B33D17,
1703 0x38D7A8B4, 0x4FD09822, 0xD6D9C998, 0xA1DEF90E,
1704 0x3161E49F, 0x4666D409, 0xDF6F85B3, 0xA868B525,
1705 0x360C2086, 0x410B1010, 0xD80241AA, 0xAF05713C,
1706 0x220D7CC9, 0x550A4C5F, 0xCC031DE5, 0xBB042D73,
1707 0x2560B8D0, 0x52678846, 0xCB6ED9FC, 0xBC69E96A,
1708 0x2CD6F4FB, 0x5BD1C46D, 0xC2D895D7, 0xB5DFA541,
1709 0x2BBB30E2, 0x5CBC0074, 0xC5B551CE, 0xB2B26158,
1710 0x04D44C65, 0x73D37CF3, 0xEADA2D49, 0x9DDD1DDF,
1711 0x03B9887C, 0x74BEB8EA, 0xEDB7E950, 0x9AB0D9C6,
1712 0x0A0FC457, 0x7D08F4C1, 0xE401A57B, 0x930695ED,
1713 0x0D62004E, 0x7A6530D8, 0xE36C6162, 0x946B51F4,
1714 0x19635C01, 0x6E646C97, 0xF76D3D2D, 0x806A0DBB,
1715 0x1E0E9818, 0x6909A88E, 0xF000F934, 0x8707C9A2,
1716 0x17B8D433, 0x60BFE4A5, 0xF9B6B51F, 0x8EB18589,
1717 0x10D5102A, 0x67D220BC, 0xFEDB7106, 0x89DC4190,
1718 0x49662D3D, 0x3E611DAB, 0xA7684C11, 0xD06F7C87,
1719 0x4E0BE924, 0x390CD9B2, 0xA0058808, 0xD702B89E,
1720 0x47BDA50F, 0x30BA9599, 0xA9B3C423, 0xDEB4F4B5,
1721 0x40D06116, 0x37D75180, 0xAEDE003A, 0xD9D930AC,
1722 0x54D13D59, 0x23D60DCF, 0xBADF5C75, 0xCDD86CE3,
1723 0x53BCF940, 0x24BBC9D6, 0xBDB2986C, 0xCAB5A8FA,
1724 0x5A0AB56B, 0x2D0D85FD, 0xB404D447, 0xC303E4D1,
1725 0x5D677172, 0x2A6041E4, 0xB369105E, 0xC46E20C8,
1726 0x72080DF5, 0x050F3D63, 0x9C066CD9, 0xEB015C4F,
1727 0x7565C9EC, 0x0262F97A, 0x9B6BA8C0, 0xEC6C9856,
1728 0x7CD385C7, 0x0BD4B551, 0x92DDE4EB, 0xE5DAD47D,
1729 0x7BBE41DE, 0x0CB97148, 0x95B020F2, 0xE2B71064,
1730 0x6FBF1D91, 0x18B82D07, 0x81B17CBD, 0xF6B64C2B,
1731 0x68D2D988, 0x1FD5E91E, 0x86DCB8A4, 0xF1DB8832,
1732 0x616495A3, 0x1663A535, 0x8F6AF48F, 0xF86DC419,
1733 0x660951BA, 0x110E612C, 0x88073096, 0xFF000000
1734};
1735
diff --git a/drivers/net/wan/sbni.h b/drivers/net/wan/sbni.h
new file mode 100644
index 000000000000..27715e70f28b
--- /dev/null
+++ b/drivers/net/wan/sbni.h
@@ -0,0 +1,141 @@
1/* sbni.h: definitions for a Granch SBNI12 driver, version 5.0.0
2 * Written 2001 Denis I.Timofeev (timofeev@granch.ru)
3 * This file is distributed under the GNU GPL
4 */
5
6#ifndef SBNI_H
7#define SBNI_H
8
9#ifdef SBNI_DEBUG
10#define DP( A ) A
11#else
12#define DP( A )
13#endif
14
15
16/* We don't have official vendor id yet... */
17#define SBNI_PCI_VENDOR 0x55
18#define SBNI_PCI_DEVICE 0x9f
19
20#define ISA_MODE 0x00
21#define PCI_MODE 0x01
22
23#define SBNI_IO_EXTENT 4
24
25enum sbni_reg {
26 CSR0 = 0,
27 CSR1 = 1,
28 DAT = 2
29};
30
31/* CSR0 mapping */
32enum {
33 BU_EMP = 0x02,
34 RC_CHK = 0x04,
35 CT_ZER = 0x08,
36 TR_REQ = 0x10,
37 TR_RDY = 0x20,
38 EN_INT = 0x40,
39 RC_RDY = 0x80
40};
41
42
43/* CSR1 mapping */
44#define PR_RES 0x80
45
46struct sbni_csr1 {
47 unsigned rxl : 5;
48 unsigned rate : 2;
49 unsigned : 1;
50};
51
52/* fields in frame header */
53#define FRAME_ACK_MASK (unsigned short)0x7000
54#define FRAME_LEN_MASK (unsigned short)0x03FF
55#define FRAME_FIRST (unsigned short)0x8000
56#define FRAME_RETRY (unsigned short)0x0800
57
58#define FRAME_SENT_BAD (unsigned short)0x4000
59#define FRAME_SENT_OK (unsigned short)0x3000
60
61
62/* state flags */
63enum {
64 FL_WAIT_ACK = 0x01,
65 FL_NEED_RESEND = 0x02,
66 FL_PREV_OK = 0x04,
67 FL_SLOW_MODE = 0x08,
68 FL_SECONDARY = 0x10,
69#ifdef CONFIG_SBNI_MULTILINE
70 FL_SLAVE = 0x20,
71#endif
72 FL_LINE_DOWN = 0x40
73};
74
75
76enum {
77 DEFAULT_IOBASEADDR = 0x210,
78 DEFAULT_INTERRUPTNUMBER = 5,
79 DEFAULT_RATE = 0,
80 DEFAULT_FRAME_LEN = 1012
81};
82
83#define DEF_RXL_DELTA -1
84#define DEF_RXL 0xf
85
86#define SBNI_SIG 0x5a
87
88#define SBNI_MIN_LEN 60 /* Shortest Ethernet frame without FCS */
89#define SBNI_MAX_FRAME 1023
90#define ETHER_MAX_LEN 1518
91
92#define SBNI_TIMEOUT (HZ/10)
93
94#define TR_ERROR_COUNT 32
95#define CHANGE_LEVEL_START_TICKS 4
96
97#define SBNI_MAX_NUM_CARDS 16
98
99/* internal SBNI-specific statistics */
100struct sbni_in_stats {
101 u32 all_rx_number;
102 u32 bad_rx_number;
103 u32 timeout_number;
104 u32 all_tx_number;
105 u32 resend_tx_number;
106};
107
108/* SBNI ioctl params */
109#define SIOCDEVGETINSTATS SIOCDEVPRIVATE
110#define SIOCDEVRESINSTATS SIOCDEVPRIVATE+1
111#define SIOCDEVGHWSTATE SIOCDEVPRIVATE+2
112#define SIOCDEVSHWSTATE SIOCDEVPRIVATE+3
113#define SIOCDEVENSLAVE SIOCDEVPRIVATE+4
114#define SIOCDEVEMANSIPATE SIOCDEVPRIVATE+5
115
116
117/* data packet for SIOCDEVGHWSTATE/SIOCDEVSHWSTATE ioctl requests */
118struct sbni_flags {
119 u32 rxl : 4;
120 u32 rate : 2;
121 u32 fixed_rxl : 1;
122 u32 slow_mode : 1;
123 u32 mac_addr : 24;
124};
125
126/*
127 * CRC-32 stuff
128 */
129#define CRC32(c,crc) (crc32tab[((size_t)(crc) ^ (c)) & 0xff] ^ (((crc) >> 8) & 0x00FFFFFF))
130 /* CRC generator 0xEDB88320 */
131 /* CRC remainder 0x2144DF1C */
132 /* CRC initial value 0x00000000 */
133#define CRC32_REMAINDER 0x2144DF1C
134#define CRC32_INITIAL 0x00000000
135
136#ifndef __initdata
137#define __initdata
138#endif
139
140#endif
141
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
new file mode 100644
index 000000000000..3ac9a45b20fa
--- /dev/null
+++ b/drivers/net/wan/sdla.c
@@ -0,0 +1,1676 @@
1/*
2 * SDLA An implementation of a driver for the Sangoma S502/S508 series
3 * multi-protocol PC interface card. Initial offering is with
4 * the DLCI driver, providing Frame Relay support for linux.
5 *
6 * Global definitions for the Frame relay interface.
7 *
8 * Version: @(#)sdla.c 0.30 12 Sep 1996
9 *
10 * Credits: Sangoma Technologies, for the use of 2 cards for an extended
11 * period of time.
12 * David Mandelstam <dm@sangoma.com> for getting me started on
13 * this project, and incentive to complete it.
14 * Gene Kozen <74604.152@compuserve.com> for providing me with
15 * important information about the cards.
16 *
17 * Author: Mike McLagan <mike.mclagan@linux.org>
18 *
19 * Changes:
20 * 0.15 Mike McLagan Improved error handling, packet dropping
21 * 0.20 Mike McLagan New transmit/receive flags for config
22 * If in FR mode, don't accept packets from
23 * non DLCI devices.
24 * 0.25 Mike McLagan Fixed problem with rejecting packets
25 * from non DLCI devices.
26 * 0.30 Mike McLagan Fixed kernel panic when used with modified
27 * ifconfig
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35#include <linux/config.h> /* for CONFIG_DLCI_MAX */
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/types.h>
39#include <linux/fcntl.h>
40#include <linux/interrupt.h>
41#include <linux/ptrace.h>
42#include <linux/ioport.h>
43#include <linux/in.h>
44#include <linux/slab.h>
45#include <linux/string.h>
46#include <linux/timer.h>
47#include <linux/errno.h>
48#include <linux/init.h>
49#include <linux/netdevice.h>
50#include <linux/skbuff.h>
51#include <linux/if_arp.h>
52#include <linux/if_frad.h>
53#include <linux/sdla.h>
54#include <linux/bitops.h>
55
56#include <asm/system.h>
57#include <asm/io.h>
58#include <asm/dma.h>
59#include <asm/uaccess.h>
60
61static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org";
62
63static unsigned int valid_port[] __initdata = { 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390};
64
65static unsigned int valid_mem[] __initdata = {
66 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000,
67 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000,
68 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
69 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000,
70 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000};
71
72static DEFINE_SPINLOCK(sdla_lock);
73
74/*********************************************************
75 *
76 * these are the core routines that access the card itself
77 *
78 *********************************************************/
79
80#define SDLA_WINDOW(dev,addr) outb((((addr) >> 13) & 0x1F), (dev)->base_addr + SDLA_REG_Z80_WINDOW)
81
82static void __sdla_read(struct net_device *dev, int addr, void *buf, short len)
83{
84 char *temp;
85 const void *base;
86 int offset, bytes;
87
88 temp = buf;
89 while(len)
90 {
91 offset = addr & SDLA_ADDR_MASK;
92 bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
93 base = (const void *) (dev->mem_start + offset);
94
95 SDLA_WINDOW(dev, addr);
96 memcpy(temp, base, bytes);
97
98 addr += bytes;
99 temp += bytes;
100 len -= bytes;
101 }
102}
103
104static void sdla_read(struct net_device *dev, int addr, void *buf, short len)
105{
106 unsigned long flags;
107 spin_lock_irqsave(&sdla_lock, flags);
108 __sdla_read(dev, addr, buf, len);
109 spin_unlock_irqrestore(&sdla_lock, flags);
110}
111
112static void __sdla_write(struct net_device *dev, int addr,
113 const void *buf, short len)
114{
115 const char *temp;
116 void *base;
117 int offset, bytes;
118
119 temp = buf;
120 while(len)
121 {
122 offset = addr & SDLA_ADDR_MASK;
123 bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
124 base = (void *) (dev->mem_start + offset);
125
126 SDLA_WINDOW(dev, addr);
127 memcpy(base, temp, bytes);
128
129 addr += bytes;
130 temp += bytes;
131 len -= bytes;
132 }
133}
134
135static void sdla_write(struct net_device *dev, int addr,
136 const void *buf, short len)
137{
138 unsigned long flags;
139
140 spin_lock_irqsave(&sdla_lock, flags);
141 __sdla_write(dev, addr, buf, len);
142 spin_unlock_irqrestore(&sdla_lock, flags);
143}
144
145
146static void sdla_clear(struct net_device *dev)
147{
148 unsigned long flags;
149 char *base;
150 int len, addr, bytes;
151
152 len = 65536;
153 addr = 0;
154 bytes = SDLA_WINDOW_SIZE;
155 base = (void *) dev->mem_start;
156
157 spin_lock_irqsave(&sdla_lock, flags);
158 while(len)
159 {
160 SDLA_WINDOW(dev, addr);
161 memset(base, 0, bytes);
162
163 addr += bytes;
164 len -= bytes;
165 }
166 spin_unlock_irqrestore(&sdla_lock, flags);
167
168}
169
170static char sdla_byte(struct net_device *dev, int addr)
171{
172 unsigned long flags;
173 char byte, *temp;
174
175 temp = (void *) (dev->mem_start + (addr & SDLA_ADDR_MASK));
176
177 spin_lock_irqsave(&sdla_lock, flags);
178 SDLA_WINDOW(dev, addr);
179 byte = *temp;
180 spin_unlock_irqrestore(&sdla_lock, flags);
181
182 return(byte);
183}
184
185void sdla_stop(struct net_device *dev)
186{
187 struct frad_local *flp;
188
189 flp = dev->priv;
190 switch(flp->type)
191 {
192 case SDLA_S502A:
193 outb(SDLA_S502A_HALT, dev->base_addr + SDLA_REG_CONTROL);
194 flp->state = SDLA_HALT;
195 break;
196 case SDLA_S502E:
197 outb(SDLA_HALT, dev->base_addr + SDLA_REG_Z80_CONTROL);
198 outb(SDLA_S502E_ENABLE, dev->base_addr + SDLA_REG_CONTROL);
199 flp->state = SDLA_S502E_ENABLE;
200 break;
201 case SDLA_S507:
202 flp->state &= ~SDLA_CPUEN;
203 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
204 break;
205 case SDLA_S508:
206 flp->state &= ~SDLA_CPUEN;
207 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
208 break;
209 }
210}
211
212void sdla_start(struct net_device *dev)
213{
214 struct frad_local *flp;
215
216 flp = dev->priv;
217 switch(flp->type)
218 {
219 case SDLA_S502A:
220 outb(SDLA_S502A_NMI, dev->base_addr + SDLA_REG_CONTROL);
221 outb(SDLA_S502A_START, dev->base_addr + SDLA_REG_CONTROL);
222 flp->state = SDLA_S502A_START;
223 break;
224 case SDLA_S502E:
225 outb(SDLA_S502E_CPUEN, dev->base_addr + SDLA_REG_Z80_CONTROL);
226 outb(0x00, dev->base_addr + SDLA_REG_CONTROL);
227 flp->state = 0;
228 break;
229 case SDLA_S507:
230 flp->state |= SDLA_CPUEN;
231 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
232 break;
233 case SDLA_S508:
234 flp->state |= SDLA_CPUEN;
235 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
236 break;
237 }
238}
239
240/****************************************************
241 *
242 * this is used for the S502A/E cards to determine
243 * the speed of the onboard CPU. Calibration is
244 * necessary for the Frame Relay code uploaded
245 * later. Incorrect results cause timing problems
246 * with link checks & status messages
247 *
248 ***************************************************/
249
250int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
251{
252 unsigned long start, done, now;
253 char resp, *temp;
254
255 start = now = jiffies;
256 done = jiffies + jiffs;
257
258 temp = (void *)dev->mem_start;
259 temp += z80_addr & SDLA_ADDR_MASK;
260
261 resp = ~resp1;
262 while (time_before(jiffies, done) && (resp != resp1) && (!resp2 || (resp != resp2)))
263 {
264 if (jiffies != now)
265 {
266 SDLA_WINDOW(dev, z80_addr);
267 now = jiffies;
268 resp = *temp;
269 }
270 }
271 return(time_before(jiffies, done) ? jiffies - start : -1);
272}
273
274/* constants for Z80 CPU speed */
275#define Z80_READY '1' /* Z80 is ready to begin */
276#define LOADER_READY '2' /* driver is ready to begin */
277#define Z80_SCC_OK '3' /* SCC is on board */
278#define Z80_SCC_BAD '4' /* SCC was not found */
279
280static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
281{
282 int jiffs;
283 char data;
284
285 sdla_start(dev);
286 if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0)
287 return(-EIO);
288
289 data = LOADER_READY;
290 sdla_write(dev, 0, &data, 1);
291
292 if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0)
293 return(-EIO);
294
295 sdla_stop(dev);
296 sdla_read(dev, 0, &data, 1);
297
298 if (data == Z80_SCC_BAD)
299 {
300 printk("%s: SCC bad\n", dev->name);
301 return(-EIO);
302 }
303
304 if (data != Z80_SCC_OK)
305 return(-EINVAL);
306
307 if (jiffs < 165)
308 ifr->ifr_mtu = SDLA_CPU_16M;
309 else if (jiffs < 220)
310 ifr->ifr_mtu = SDLA_CPU_10M;
311 else if (jiffs < 258)
312 ifr->ifr_mtu = SDLA_CPU_8M;
313 else if (jiffs < 357)
314 ifr->ifr_mtu = SDLA_CPU_7M;
315 else if (jiffs < 467)
316 ifr->ifr_mtu = SDLA_CPU_5M;
317 else
318 ifr->ifr_mtu = SDLA_CPU_3M;
319
320 return(0);
321}
322
323/************************************************
324 *
325 * Direct interaction with the Frame Relay code
326 * starts here.
327 *
328 ************************************************/
329
330struct _dlci_stat
331{
332 short dlci __attribute__((packed));
333 char flags __attribute__((packed));
334};
335
336struct _frad_stat
337{
338 char flags;
339 struct _dlci_stat dlcis[SDLA_MAX_DLCI];
340};
341
342static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int len, void *data)
343{
344 struct _dlci_stat *pstatus;
345 short *pdlci;
346 int i;
347 char *state, line[30];
348
349 switch (ret)
350 {
351 case SDLA_RET_MODEM:
352 state = data;
353 if (*state & SDLA_MODEM_DCD_LOW)
354 printk(KERN_INFO "%s: Modem DCD unexpectedly low!\n", dev->name);
355 if (*state & SDLA_MODEM_CTS_LOW)
356 printk(KERN_INFO "%s: Modem CTS unexpectedly low!\n", dev->name);
357 /* I should probably do something about this! */
358 break;
359
360 case SDLA_RET_CHANNEL_OFF:
361 printk(KERN_INFO "%s: Channel became inoperative!\n", dev->name);
362 /* same here */
363 break;
364
365 case SDLA_RET_CHANNEL_ON:
366 printk(KERN_INFO "%s: Channel became operative!\n", dev->name);
367 /* same here */
368 break;
369
370 case SDLA_RET_DLCI_STATUS:
371 printk(KERN_INFO "%s: Status change reported by Access Node.\n", dev->name);
372 len /= sizeof(struct _dlci_stat);
373 for(pstatus = data, i=0;i < len;i++,pstatus++)
374 {
375 if (pstatus->flags & SDLA_DLCI_NEW)
376 state = "new";
377 else if (pstatus->flags & SDLA_DLCI_DELETED)
378 state = "deleted";
379 else if (pstatus->flags & SDLA_DLCI_ACTIVE)
380 state = "active";
381 else
382 {
383 sprintf(line, "unknown status: %02X", pstatus->flags);
384 state = line;
385 }
386 printk(KERN_INFO "%s: DLCI %i: %s.\n", dev->name, pstatus->dlci, state);
387 /* same here */
388 }
389 break;
390
391 case SDLA_RET_DLCI_UNKNOWN:
392 printk(KERN_INFO "%s: Received unknown DLCIs:", dev->name);
393 len /= sizeof(short);
394 for(pdlci = data,i=0;i < len;i++,pdlci++)
395 printk(" %i", *pdlci);
396 printk("\n");
397 break;
398
399 case SDLA_RET_TIMEOUT:
400 printk(KERN_ERR "%s: Command timed out!\n", dev->name);
401 break;
402
403 case SDLA_RET_BUF_OVERSIZE:
404 printk(KERN_INFO "%s: Bc/CIR overflow, acceptable size is %i\n", dev->name, len);
405 break;
406
407 case SDLA_RET_BUF_TOO_BIG:
408 printk(KERN_INFO "%s: Buffer size over specified max of %i\n", dev->name, len);
409 break;
410
411 case SDLA_RET_CHANNEL_INACTIVE:
412 case SDLA_RET_DLCI_INACTIVE:
413 case SDLA_RET_CIR_OVERFLOW:
414 case SDLA_RET_NO_BUFS:
415 if (cmd == SDLA_INFORMATION_WRITE)
416 break;
417
418 default:
419 printk(KERN_DEBUG "%s: Cmd 0x%2.2X generated return code 0x%2.2X\n", dev->name, cmd, ret);
420 /* Further processing could be done here */
421 break;
422 }
423}
424
425static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
426 void *inbuf, short inlen, void *outbuf, short *outlen)
427{
428 static struct _frad_stat status;
429 struct frad_local *flp;
430 struct sdla_cmd *cmd_buf;
431 unsigned long pflags;
432 unsigned long jiffs;
433 int ret, waiting, len;
434 long window;
435
436 flp = dev->priv;
437 window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF;
438 cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK));
439 ret = 0;
440 len = 0;
441 jiffs = jiffies + HZ; /* 1 second is plenty */
442
443 spin_lock_irqsave(&sdla_lock, pflags);
444 SDLA_WINDOW(dev, window);
445 cmd_buf->cmd = cmd;
446 cmd_buf->dlci = dlci;
447 cmd_buf->flags = flags;
448
449 if (inbuf)
450 memcpy(cmd_buf->data, inbuf, inlen);
451
452 cmd_buf->length = inlen;
453
454 cmd_buf->opp_flag = 1;
455 spin_unlock_irqrestore(&sdla_lock, pflags);
456
457 waiting = 1;
458 len = 0;
459 while (waiting && time_before_eq(jiffies, jiffs))
460 {
461 if (waiting++ % 3)
462 {
463 spin_lock_irqsave(&sdla_lock, pflags);
464 SDLA_WINDOW(dev, window);
465 waiting = ((volatile int)(cmd_buf->opp_flag));
466 spin_unlock_irqrestore(&sdla_lock, pflags);
467 }
468 }
469
470 if (!waiting)
471 {
472
473 spin_lock_irqsave(&sdla_lock, pflags);
474 SDLA_WINDOW(dev, window);
475 ret = cmd_buf->retval;
476 len = cmd_buf->length;
477 if (outbuf && outlen)
478 {
479 *outlen = *outlen >= len ? len : *outlen;
480
481 if (*outlen)
482 memcpy(outbuf, cmd_buf->data, *outlen);
483 }
484
485 /* This is a local copy that's used for error handling */
486 if (ret)
487 memcpy(&status, cmd_buf->data, len > sizeof(status) ? sizeof(status) : len);
488
489 spin_unlock_irqrestore(&sdla_lock, pflags);
490 }
491 else
492 ret = SDLA_RET_TIMEOUT;
493
494 if (ret != SDLA_RET_OK)
495 sdla_errors(dev, cmd, dlci, ret, len, &status);
496
497 return(ret);
498}
499
500/***********************************************
501 *
502 * these functions are called by the DLCI driver
503 *
504 ***********************************************/
505
506static int sdla_reconfig(struct net_device *dev);
507
508int sdla_activate(struct net_device *slave, struct net_device *master)
509{
510 struct frad_local *flp;
511 int i;
512
513 flp = slave->priv;
514
515 for(i=0;i<CONFIG_DLCI_MAX;i++)
516 if (flp->master[i] == master)
517 break;
518
519 if (i == CONFIG_DLCI_MAX)
520 return(-ENODEV);
521
522 flp->dlci[i] = abs(flp->dlci[i]);
523
524 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
525 sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
526
527 return(0);
528}
529
530int sdla_deactivate(struct net_device *slave, struct net_device *master)
531{
532 struct frad_local *flp;
533 int i;
534
535 flp = slave->priv;
536
537 for(i=0;i<CONFIG_DLCI_MAX;i++)
538 if (flp->master[i] == master)
539 break;
540
541 if (i == CONFIG_DLCI_MAX)
542 return(-ENODEV);
543
544 flp->dlci[i] = -abs(flp->dlci[i]);
545
546 if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
547 sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
548
549 return(0);
550}
551
552int sdla_assoc(struct net_device *slave, struct net_device *master)
553{
554 struct frad_local *flp;
555 int i;
556
557 if (master->type != ARPHRD_DLCI)
558 return(-EINVAL);
559
560 flp = slave->priv;
561
562 for(i=0;i<CONFIG_DLCI_MAX;i++)
563 {
564 if (!flp->master[i])
565 break;
566 if (abs(flp->dlci[i]) == *(short *)(master->dev_addr))
567 return(-EADDRINUSE);
568 }
569
570 if (i == CONFIG_DLCI_MAX)
571 return(-EMLINK); /* #### Alan: Comments on this ?? */
572
573
574 flp->master[i] = master;
575 flp->dlci[i] = -*(short *)(master->dev_addr);
576 master->mtu = slave->mtu;
577
578 if (netif_running(slave)) {
579 if (flp->config.station == FRAD_STATION_CPE)
580 sdla_reconfig(slave);
581 else
582 sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
583 }
584
585 return(0);
586}
587
588int sdla_deassoc(struct net_device *slave, struct net_device *master)
589{
590 struct frad_local *flp;
591 int i;
592
593 flp = slave->priv;
594
595 for(i=0;i<CONFIG_DLCI_MAX;i++)
596 if (flp->master[i] == master)
597 break;
598
599 if (i == CONFIG_DLCI_MAX)
600 return(-ENODEV);
601
602 flp->master[i] = NULL;
603 flp->dlci[i] = 0;
604
605
606 if (netif_running(slave)) {
607 if (flp->config.station == FRAD_STATION_CPE)
608 sdla_reconfig(slave);
609 else
610 sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
611 }
612
613 return(0);
614}
615
616int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
617{
618 struct frad_local *flp;
619 struct dlci_local *dlp;
620 int i;
621 short len, ret;
622
623 flp = slave->priv;
624
625 for(i=0;i<CONFIG_DLCI_MAX;i++)
626 if (flp->master[i] == master)
627 break;
628
629 if (i == CONFIG_DLCI_MAX)
630 return(-ENODEV);
631
632 dlp = master->priv;
633
634 ret = SDLA_RET_OK;
635 len = sizeof(struct dlci_conf);
636 if (netif_running(slave)) {
637 if (get)
638 ret = sdla_cmd(slave, SDLA_READ_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,
639 NULL, 0, &dlp->config, &len);
640 else
641 ret = sdla_cmd(slave, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,
642 &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL);
643 }
644
645 return(ret == SDLA_RET_OK ? 0 : -EIO);
646}
647
648/**************************
649 *
650 * now for the Linux driver
651 *
652 **************************/
653
654/* NOTE: the DLCI driver deals with freeing the SKB!! */
655static int sdla_transmit(struct sk_buff *skb, struct net_device *dev)
656{
657 struct frad_local *flp;
658 int ret, addr, accept, i;
659 short size;
660 unsigned long flags;
661 struct buf_entry *pbuf;
662
663 flp = dev->priv;
664 ret = 0;
665 accept = 1;
666
667 netif_stop_queue(dev);
668
669 /*
670 * stupid GateD insists on setting up the multicast router thru us
671 * and we're ill equipped to handle a non Frame Relay packet at this
672 * time!
673 */
674
675 accept = 1;
676 switch (dev->type)
677 {
678 case ARPHRD_FRAD:
679 if (skb->dev->type != ARPHRD_DLCI)
680 {
681 printk(KERN_WARNING "%s: Non DLCI device, type %i, tried to send on FRAD module.\n", dev->name, skb->dev->type);
682 accept = 0;
683 }
684 break;
685 default:
686 printk(KERN_WARNING "%s: unknown firmware type 0x%4.4X\n", dev->name, dev->type);
687 accept = 0;
688 break;
689 }
690 if (accept)
691 {
692 /* this is frame specific, but till there's a PPP module, it's the default */
693 switch (flp->type)
694 {
695 case SDLA_S502A:
696 case SDLA_S502E:
697 ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, skb->data, skb->len, NULL, NULL);
698 break;
699 case SDLA_S508:
700 size = sizeof(addr);
701 ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, NULL, skb->len, &addr, &size);
702 if (ret == SDLA_RET_OK)
703 {
704
705 spin_lock_irqsave(&sdla_lock, flags);
706 SDLA_WINDOW(dev, addr);
707 pbuf = (void *)(((int) dev->mem_start) + (addr & SDLA_ADDR_MASK));
708 __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
709 SDLA_WINDOW(dev, addr);
710 pbuf->opp_flag = 1;
711 spin_unlock_irqrestore(&sdla_lock, flags);
712 }
713 break;
714 }
715 switch (ret)
716 {
717 case SDLA_RET_OK:
718 flp->stats.tx_packets++;
719 ret = DLCI_RET_OK;
720 break;
721
722 case SDLA_RET_CIR_OVERFLOW:
723 case SDLA_RET_BUF_OVERSIZE:
724 case SDLA_RET_NO_BUFS:
725 flp->stats.tx_dropped++;
726 ret = DLCI_RET_DROP;
727 break;
728
729 default:
730 flp->stats.tx_errors++;
731 ret = DLCI_RET_ERR;
732 break;
733 }
734 }
735 netif_wake_queue(dev);
736 for(i=0;i<CONFIG_DLCI_MAX;i++)
737 {
738 if(flp->master[i]!=NULL)
739 netif_wake_queue(flp->master[i]);
740 }
741 return(ret);
742}
743
744static void sdla_receive(struct net_device *dev)
745{
746 struct net_device *master;
747 struct frad_local *flp;
748 struct dlci_local *dlp;
749 struct sk_buff *skb;
750
751 struct sdla_cmd *cmd;
752 struct buf_info *pbufi;
753 struct buf_entry *pbuf;
754
755 unsigned long flags;
756 int i=0, received, success, addr, buf_base, buf_top;
757 short dlci, len, len2, split;
758
759 flp = dev->priv;
760 success = 1;
761 received = addr = buf_top = buf_base = 0;
762 len = dlci = 0;
763 skb = NULL;
764 master = NULL;
765 cmd = NULL;
766 pbufi = NULL;
767 pbuf = NULL;
768
769 spin_lock_irqsave(&sdla_lock, flags);
770
771 switch (flp->type)
772 {
773 case SDLA_S502A:
774 case SDLA_S502E:
775 cmd = (void *) (dev->mem_start + (SDLA_502_RCV_BUF & SDLA_ADDR_MASK));
776 SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
777 success = cmd->opp_flag;
778 if (!success)
779 break;
780
781 dlci = cmd->dlci;
782 len = cmd->length;
783 break;
784
785 case SDLA_S508:
786 pbufi = (void *) (dev->mem_start + (SDLA_508_RXBUF_INFO & SDLA_ADDR_MASK));
787 SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
788 pbuf = (void *) (dev->mem_start + ((pbufi->rse_base + flp->buffer * sizeof(struct buf_entry)) & SDLA_ADDR_MASK));
789 success = pbuf->opp_flag;
790 if (!success)
791 break;
792
793 buf_top = pbufi->buf_top;
794 buf_base = pbufi->buf_base;
795 dlci = pbuf->dlci;
796 len = pbuf->length;
797 addr = pbuf->buf_addr;
798 break;
799 }
800
801 /* common code, find the DLCI and get the SKB */
802 if (success)
803 {
804 for (i=0;i<CONFIG_DLCI_MAX;i++)
805 if (flp->dlci[i] == dlci)
806 break;
807
808 if (i == CONFIG_DLCI_MAX)
809 {
810 printk(KERN_NOTICE "%s: Received packet from invalid DLCI %i, ignoring.", dev->name, dlci);
811 flp->stats.rx_errors++;
812 success = 0;
813 }
814 }
815
816 if (success)
817 {
818 master = flp->master[i];
819 skb = dev_alloc_skb(len + sizeof(struct frhdr));
820 if (skb == NULL)
821 {
822 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
823 flp->stats.rx_dropped++;
824 success = 0;
825 }
826 else
827 skb_reserve(skb, sizeof(struct frhdr));
828 }
829
830 /* pick up the data */
831 switch (flp->type)
832 {
833 case SDLA_S502A:
834 case SDLA_S502E:
835 if (success)
836 __sdla_read(dev, SDLA_502_RCV_BUF + SDLA_502_DATA_OFS, skb_put(skb,len), len);
837
838 SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
839 cmd->opp_flag = 0;
840 break;
841
842 case SDLA_S508:
843 if (success)
844 {
845 /* is this buffer split off the end of the internal ring buffer */
846 split = addr + len > buf_top + 1 ? len - (buf_top - addr + 1) : 0;
847 len2 = len - split;
848
849 __sdla_read(dev, addr, skb_put(skb, len2), len2);
850 if (split)
851 __sdla_read(dev, buf_base, skb_put(skb, split), split);
852 }
853
854 /* increment the buffer we're looking at */
855 SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
856 flp->buffer = (flp->buffer + 1) % pbufi->rse_num;
857 pbuf->opp_flag = 0;
858 break;
859 }
860
861 if (success)
862 {
863 flp->stats.rx_packets++;
864 dlp = master->priv;
865 (*dlp->receive)(skb, master);
866 }
867
868 spin_unlock_irqrestore(&sdla_lock, flags);
869}
870
871static irqreturn_t sdla_isr(int irq, void *dev_id, struct pt_regs * regs)
872{
873 struct net_device *dev;
874 struct frad_local *flp;
875 char byte;
876
877 dev = dev_id;
878
879 if (dev == NULL)
880 {
881 printk(KERN_WARNING "sdla_isr(): irq %d for unknown device.\n", irq);
882 return IRQ_NONE;
883 }
884
885 flp = dev->priv;
886
887 if (!flp->initialized)
888 {
889 printk(KERN_WARNING "%s: irq %d for uninitialized device.\n", dev->name, irq);
890 return IRQ_NONE;
891 }
892
893 byte = sdla_byte(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE);
894 switch (byte)
895 {
896 case SDLA_INTR_RX:
897 sdla_receive(dev);
898 break;
899
900 /* the command will get an error return, which is processed above */
901 case SDLA_INTR_MODEM:
902 case SDLA_INTR_STATUS:
903 sdla_cmd(dev, SDLA_READ_DLC_STATUS, 0, 0, NULL, 0, NULL, NULL);
904 break;
905
906 case SDLA_INTR_TX:
907 case SDLA_INTR_COMPLETE:
908 case SDLA_INTR_TIMER:
909 printk(KERN_WARNING "%s: invalid irq flag 0x%02X.\n", dev->name, byte);
910 break;
911 }
912
913 /* the S502E requires a manual acknowledgement of the interrupt */
914 if (flp->type == SDLA_S502E)
915 {
916 flp->state &= ~SDLA_S502E_INTACK;
917 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
918 flp->state |= SDLA_S502E_INTACK;
919 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
920 }
921
922 /* this clears the byte, informing the Z80 we're done */
923 byte = 0;
924 sdla_write(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
925 return IRQ_HANDLED;
926}
927
928static void sdla_poll(unsigned long device)
929{
930 struct net_device *dev;
931 struct frad_local *flp;
932
933 dev = (struct net_device *) device;
934 flp = dev->priv;
935
936 if (sdla_byte(dev, SDLA_502_RCV_BUF))
937 sdla_receive(dev);
938
939 flp->timer.expires = 1;
940 add_timer(&flp->timer);
941}
942
943static int sdla_close(struct net_device *dev)
944{
945 struct frad_local *flp;
946 struct intr_info intr;
947 int len, i;
948 short dlcis[CONFIG_DLCI_MAX];
949
950 flp = dev->priv;
951
952 len = 0;
953 for(i=0;i<CONFIG_DLCI_MAX;i++)
954 if (flp->dlci[i])
955 dlcis[len++] = abs(flp->dlci[i]);
956 len *= 2;
957
958 if (flp->config.station == FRAD_STATION_NODE)
959 {
960 for(i=0;i<CONFIG_DLCI_MAX;i++)
961 if (flp->dlci[i] > 0)
962 sdla_cmd(dev, SDLA_DEACTIVATE_DLCI, 0, 0, dlcis, len, NULL, NULL);
963 sdla_cmd(dev, SDLA_DELETE_DLCI, 0, 0, &flp->dlci[i], sizeof(flp->dlci[i]), NULL, NULL);
964 }
965
966 memset(&intr, 0, sizeof(intr));
967 /* let's start up the reception */
968 switch(flp->type)
969 {
970 case SDLA_S502A:
971 del_timer(&flp->timer);
972 break;
973
974 case SDLA_S502E:
975 sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
976 flp->state &= ~SDLA_S502E_INTACK;
977 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
978 break;
979
980 case SDLA_S507:
981 break;
982
983 case SDLA_S508:
984 sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
985 flp->state &= ~SDLA_S508_INTEN;
986 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
987 break;
988 }
989
990 sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
991
992 netif_stop_queue(dev);
993
994 return(0);
995}
996
997struct conf_data {
998 struct frad_conf config;
999 short dlci[CONFIG_DLCI_MAX];
1000};
1001
1002static int sdla_open(struct net_device *dev)
1003{
1004 struct frad_local *flp;
1005 struct dlci_local *dlp;
1006 struct conf_data data;
1007 struct intr_info intr;
1008 int len, i;
1009 char byte;
1010
1011 flp = dev->priv;
1012
1013 if (!flp->initialized)
1014 return(-EPERM);
1015
1016 if (!flp->configured)
1017 return(-EPERM);
1018
1019 /* time to send in the configuration */
1020 len = 0;
1021 for(i=0;i<CONFIG_DLCI_MAX;i++)
1022 if (flp->dlci[i])
1023 data.dlci[len++] = abs(flp->dlci[i]);
1024 len *= 2;
1025
1026 memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
1027 len += sizeof(struct frad_conf);
1028
1029 sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
1030 sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
1031
1032 if (flp->type == SDLA_S508)
1033 flp->buffer = 0;
1034
1035 sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
1036
1037 /* let's start up the reception */
1038 memset(&intr, 0, sizeof(intr));
1039 switch(flp->type)
1040 {
1041 case SDLA_S502A:
1042 flp->timer.expires = 1;
1043 add_timer(&flp->timer);
1044 break;
1045
1046 case SDLA_S502E:
1047 flp->state |= SDLA_S502E_ENABLE;
1048 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
1049 flp->state |= SDLA_S502E_INTACK;
1050 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
1051 byte = 0;
1052 sdla_write(dev, SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
1053 intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
1054 sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
1055 break;
1056
1057 case SDLA_S507:
1058 break;
1059
1060 case SDLA_S508:
1061 flp->state |= SDLA_S508_INTEN;
1062 outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
1063 byte = 0;
1064 sdla_write(dev, SDLA_508_IRQ_INTERFACE, &byte, sizeof(byte));
1065 intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
1066 intr.irq = dev->irq;
1067 sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
1068 break;
1069 }
1070
1071 if (flp->config.station == FRAD_STATION_CPE)
1072 {
1073 byte = SDLA_ICS_STATUS_ENQ;
1074 sdla_cmd(dev, SDLA_ISSUE_IN_CHANNEL_SIGNAL, 0, 0, &byte, sizeof(byte), NULL, NULL);
1075 }
1076 else
1077 {
1078 sdla_cmd(dev, SDLA_ADD_DLCI, 0, 0, data.dlci, len - sizeof(struct frad_conf), NULL, NULL);
1079 for(i=0;i<CONFIG_DLCI_MAX;i++)
1080 if (flp->dlci[i] > 0)
1081 sdla_cmd(dev, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], 2*sizeof(flp->dlci[i]), NULL, NULL);
1082 }
1083
1084 /* configure any specific DLCI settings */
1085 for(i=0;i<CONFIG_DLCI_MAX;i++)
1086 if (flp->dlci[i])
1087 {
1088 dlp = flp->master[i]->priv;
1089 if (dlp->configured)
1090 sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL);
1091 }
1092
1093 netif_start_queue(dev);
1094
1095 return(0);
1096}
1097
1098static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get)
1099{
1100 struct frad_local *flp;
1101 struct conf_data data;
1102 int i;
1103 short size;
1104
1105 if (dev->type == 0xFFFF)
1106 return(-EUNATCH);
1107
1108 flp = dev->priv;
1109
1110 if (!get)
1111 {
1112 if (netif_running(dev))
1113 return(-EBUSY);
1114
1115 if(copy_from_user(&data.config, conf, sizeof(struct frad_conf)))
1116 return -EFAULT;
1117
1118 if (data.config.station & ~FRAD_STATION_NODE)
1119 return(-EINVAL);
1120
1121 if (data.config.flags & ~FRAD_VALID_FLAGS)
1122 return(-EINVAL);
1123
1124 if ((data.config.kbaud < 0) ||
1125 ((data.config.kbaud > 128) && (flp->type != SDLA_S508)))
1126 return(-EINVAL);
1127
1128 if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232))
1129 return(-EINVAL);
1130
1131 if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU))
1132 return(-EINVAL);
1133
1134 if ((data.config.T391 < 5) || (data.config.T391 > 30))
1135 return(-EINVAL);
1136
1137 if ((data.config.T392 < 5) || (data.config.T392 > 30))
1138 return(-EINVAL);
1139
1140 if ((data.config.N391 < 1) || (data.config.N391 > 255))
1141 return(-EINVAL);
1142
1143 if ((data.config.N392 < 1) || (data.config.N392 > 10))
1144 return(-EINVAL);
1145
1146 if ((data.config.N393 < 1) || (data.config.N393 > 10))
1147 return(-EINVAL);
1148
1149 memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
1150 flp->config.flags |= SDLA_DIRECT_RECV;
1151
1152 if (flp->type == SDLA_S508)
1153 flp->config.flags |= SDLA_TX70_RX30;
1154
1155 if (dev->mtu != flp->config.mtu)
1156 {
1157 /* this is required to change the MTU */
1158 dev->mtu = flp->config.mtu;
1159 for(i=0;i<CONFIG_DLCI_MAX;i++)
1160 if (flp->master[i])
1161 flp->master[i]->mtu = flp->config.mtu;
1162 }
1163
1164 flp->config.mtu += sizeof(struct frhdr);
1165
1166 /* off to the races! */
1167 if (!flp->configured)
1168 sdla_start(dev);
1169
1170 flp->configured = 1;
1171 }
1172 else
1173 {
1174 /* no sense reading if the CPU isn't started */
1175 if (netif_running(dev))
1176 {
1177 size = sizeof(data);
1178 if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK)
1179 return(-EIO);
1180 }
1181 else
1182 if (flp->configured)
1183 memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
1184 else
1185 memset(&data.config, 0, sizeof(struct frad_conf));
1186
1187 memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
1188 data.config.flags &= FRAD_VALID_FLAGS;
1189 data.config.mtu -= data.config.mtu > sizeof(struct frhdr) ? sizeof(struct frhdr) : data.config.mtu;
1190 return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0;
1191 }
1192
1193 return(0);
1194}
1195
1196static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read)
1197{
1198 struct sdla_mem mem;
1199 char *temp;
1200
1201 if(copy_from_user(&mem, info, sizeof(mem)))
1202 return -EFAULT;
1203
1204 if (read)
1205 {
1206 temp = kmalloc(mem.len, GFP_KERNEL);
1207 if (!temp)
1208 return(-ENOMEM);
1209 memset(temp, 0, mem.len);
1210 sdla_read(dev, mem.addr, temp, mem.len);
1211 if(copy_to_user(mem.data, temp, mem.len))
1212 {
1213 kfree(temp);
1214 return -EFAULT;
1215 }
1216 kfree(temp);
1217 }
1218 else
1219 {
1220 temp = kmalloc(mem.len, GFP_KERNEL);
1221 if (!temp)
1222 return(-ENOMEM);
1223 if(copy_from_user(temp, mem.data, mem.len))
1224 {
1225 kfree(temp);
1226 return -EFAULT;
1227 }
1228 sdla_write(dev, mem.addr, temp, mem.len);
1229 kfree(temp);
1230 }
1231 return(0);
1232}
1233
1234static int sdla_reconfig(struct net_device *dev)
1235{
1236 struct frad_local *flp;
1237 struct conf_data data;
1238 int i, len;
1239
1240 flp = dev->priv;
1241
1242 len = 0;
1243 for(i=0;i<CONFIG_DLCI_MAX;i++)
1244 if (flp->dlci[i])
1245 data.dlci[len++] = flp->dlci[i];
1246 len *= 2;
1247
1248 memcpy(&data, &flp->config, sizeof(struct frad_conf));
1249 len += sizeof(struct frad_conf);
1250
1251 sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
1252 sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
1253 sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
1254
1255 return(0);
1256}
1257
1258static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1259{
1260 struct frad_local *flp;
1261
1262 if(!capable(CAP_NET_ADMIN))
1263 return -EPERM;
1264
1265 flp = dev->priv;
1266
1267 if (!flp->initialized)
1268 return(-EINVAL);
1269
1270 switch (cmd)
1271 {
1272 case FRAD_GET_CONF:
1273 case FRAD_SET_CONF:
1274 return(sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF));
1275
1276 case SDLA_IDENTIFY:
1277 ifr->ifr_flags = flp->type;
1278 break;
1279
1280 case SDLA_CPUSPEED:
1281 return(sdla_cpuspeed(dev, ifr));
1282
1283/* ==========================================================
1284NOTE: This is rather a useless action right now, as the
1285 current driver does not support protocols other than
1286 FR. However, Sangoma has modules for a number of
1287 other protocols in the works.
1288============================================================*/
1289 case SDLA_PROTOCOL:
1290 if (flp->configured)
1291 return(-EALREADY);
1292
1293 switch (ifr->ifr_flags)
1294 {
1295 case ARPHRD_FRAD:
1296 dev->type = ifr->ifr_flags;
1297 break;
1298 default:
1299 return(-ENOPROTOOPT);
1300 }
1301 break;
1302
1303 case SDLA_CLEARMEM:
1304 sdla_clear(dev);
1305 break;
1306
1307 case SDLA_WRITEMEM:
1308 case SDLA_READMEM:
1309 if(!capable(CAP_SYS_RAWIO))
1310 return -EPERM;
1311 return(sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM));
1312
1313 case SDLA_START:
1314 sdla_start(dev);
1315 break;
1316
1317 case SDLA_STOP:
1318 sdla_stop(dev);
1319 break;
1320
1321 default:
1322 return(-EOPNOTSUPP);
1323 }
1324 return(0);
1325}
1326
1327int sdla_change_mtu(struct net_device *dev, int new_mtu)
1328{
1329 struct frad_local *flp;
1330
1331 flp = dev->priv;
1332
1333 if (netif_running(dev))
1334 return(-EBUSY);
1335
1336 /* for now, you can't change the MTU! */
1337 return(-EOPNOTSUPP);
1338}
1339
1340int sdla_set_config(struct net_device *dev, struct ifmap *map)
1341{
1342 struct frad_local *flp;
1343 int i;
1344 char byte;
1345 unsigned base;
1346 int err = -EINVAL;
1347
1348 flp = dev->priv;
1349
1350 if (flp->initialized)
1351 return(-EINVAL);
1352
1353 for(i=0;i < sizeof(valid_port) / sizeof (int) ; i++)
1354 if (valid_port[i] == map->base_addr)
1355 break;
1356
1357 if (i == sizeof(valid_port) / sizeof(int))
1358 return(-EINVAL);
1359
1360 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
1361 printk(KERN_WARNING "SDLA: io-port 0x%04lx in use \n", dev->base_addr);
1362 return(-EINVAL);
1363 }
1364 base = map->base_addr;
1365
1366 /* test for card types, S502A, S502E, S507, S508 */
1367 /* these tests shut down the card completely, so clear the state */
1368 flp->type = SDLA_UNKNOWN;
1369 flp->state = 0;
1370
1371 for(i=1;i<SDLA_IO_EXTENTS;i++)
1372 if (inb(base + i) != 0xFF)
1373 break;
1374
1375 if (i == SDLA_IO_EXTENTS) {
1376 outb(SDLA_HALT, base + SDLA_REG_Z80_CONTROL);
1377 if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x08) {
1378 outb(SDLA_S502E_INTACK, base + SDLA_REG_CONTROL);
1379 if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x0C) {
1380 outb(SDLA_HALT, base + SDLA_REG_CONTROL);
1381 flp->type = SDLA_S502E;
1382 goto got_type;
1383 }
1384 }
1385 }
1386
1387 for(byte=inb(base),i=0;i<SDLA_IO_EXTENTS;i++)
1388 if (inb(base + i) != byte)
1389 break;
1390
1391 if (i == SDLA_IO_EXTENTS) {
1392 outb(SDLA_HALT, base + SDLA_REG_CONTROL);
1393 if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x30) {
1394 outb(SDLA_S507_ENABLE, base + SDLA_REG_CONTROL);
1395 if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x32) {
1396 outb(SDLA_HALT, base + SDLA_REG_CONTROL);
1397 flp->type = SDLA_S507;
1398 goto got_type;
1399 }
1400 }
1401 }
1402
1403 outb(SDLA_HALT, base + SDLA_REG_CONTROL);
1404 if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x00) {
1405 outb(SDLA_S508_INTEN, base + SDLA_REG_CONTROL);
1406 if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x10) {
1407 outb(SDLA_HALT, base + SDLA_REG_CONTROL);
1408 flp->type = SDLA_S508;
1409 goto got_type;
1410 }
1411 }
1412
1413 outb(SDLA_S502A_HALT, base + SDLA_REG_CONTROL);
1414 if (inb(base + SDLA_S502_STS) == 0x40) {
1415 outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
1416 if (inb(base + SDLA_S502_STS) == 0x40) {
1417 outb(SDLA_S502A_INTEN, base + SDLA_REG_CONTROL);
1418 if (inb(base + SDLA_S502_STS) == 0x44) {
1419 outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
1420 flp->type = SDLA_S502A;
1421 goto got_type;
1422 }
1423 }
1424 }
1425
1426 printk(KERN_NOTICE "%s: Unknown card type\n", dev->name);
1427 err = -ENODEV;
1428 goto fail;
1429
1430got_type:
1431 switch(base) {
1432 case 0x270:
1433 case 0x280:
1434 case 0x380:
1435 case 0x390:
1436 if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
1437 goto fail;
1438 }
1439
1440 switch (map->irq) {
1441 case 2:
1442 if (flp->type != SDLA_S502E)
1443 goto fail;
1444 break;
1445
1446 case 10:
1447 case 11:
1448 case 12:
1449 case 15:
1450 case 4:
1451 if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
1452 goto fail;
1453 break;
1454 case 3:
1455 case 5:
1456 case 7:
1457 if (flp->type == SDLA_S502A)
1458 goto fail;
1459 break;
1460
1461 default:
1462 goto fail;
1463 }
1464
1465 err = -EAGAIN;
1466 if (request_irq(dev->irq, &sdla_isr, 0, dev->name, dev))
1467 goto fail;
1468
1469 if (flp->type == SDLA_S507) {
1470 switch(dev->irq) {
1471 case 3:
1472 flp->state = SDLA_S507_IRQ3;
1473 break;
1474 case 4:
1475 flp->state = SDLA_S507_IRQ4;
1476 break;
1477 case 5:
1478 flp->state = SDLA_S507_IRQ5;
1479 break;
1480 case 7:
1481 flp->state = SDLA_S507_IRQ7;
1482 break;
1483 case 10:
1484 flp->state = SDLA_S507_IRQ10;
1485 break;
1486 case 11:
1487 flp->state = SDLA_S507_IRQ11;
1488 break;
1489 case 12:
1490 flp->state = SDLA_S507_IRQ12;
1491 break;
1492 case 15:
1493 flp->state = SDLA_S507_IRQ15;
1494 break;
1495 }
1496 }
1497
1498 for(i=0;i < sizeof(valid_mem) / sizeof (int) ; i++)
1499 if (valid_mem[i] == map->mem_start)
1500 break;
1501
1502 err = -EINVAL;
1503 if (i == sizeof(valid_mem) / sizeof(int))
1504 goto fail2;
1505
1506 if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E)
1507 goto fail2;
1508
1509 if (flp->type != SDLA_S507 && map->mem_start >> 16 == 0x0B)
1510 goto fail2;
1511
1512 if (flp->type == SDLA_S507 && map->mem_start >> 16 == 0x0D)
1513 goto fail2;
1514
1515 byte = flp->type != SDLA_S508 ? SDLA_8K_WINDOW : 0;
1516 byte |= (map->mem_start & 0xF000) >> (12 + (flp->type == SDLA_S508 ? 1 : 0));
1517 switch(flp->type) {
1518 case SDLA_S502A:
1519 case SDLA_S502E:
1520 switch (map->mem_start >> 16) {
1521 case 0x0A:
1522 byte |= SDLA_S502_SEG_A;
1523 break;
1524 case 0x0C:
1525 byte |= SDLA_S502_SEG_C;
1526 break;
1527 case 0x0D:
1528 byte |= SDLA_S502_SEG_D;
1529 break;
1530 case 0x0E:
1531 byte |= SDLA_S502_SEG_E;
1532 break;
1533 }
1534 break;
1535 case SDLA_S507:
1536 switch (map->mem_start >> 16) {
1537 case 0x0A:
1538 byte |= SDLA_S507_SEG_A;
1539 break;
1540 case 0x0B:
1541 byte |= SDLA_S507_SEG_B;
1542 break;
1543 case 0x0C:
1544 byte |= SDLA_S507_SEG_C;
1545 break;
1546 case 0x0E:
1547 byte |= SDLA_S507_SEG_E;
1548 break;
1549 }
1550 break;
1551 case SDLA_S508:
1552 switch (map->mem_start >> 16) {
1553 case 0x0A:
1554 byte |= SDLA_S508_SEG_A;
1555 break;
1556 case 0x0C:
1557 byte |= SDLA_S508_SEG_C;
1558 break;
1559 case 0x0D:
1560 byte |= SDLA_S508_SEG_D;
1561 break;
1562 case 0x0E:
1563 byte |= SDLA_S508_SEG_E;
1564 break;
1565 }
1566 break;
1567 }
1568
1569 /* set the memory bits, and enable access */
1570 outb(byte, base + SDLA_REG_PC_WINDOW);
1571
1572 switch(flp->type)
1573 {
1574 case SDLA_S502E:
1575 flp->state = SDLA_S502E_ENABLE;
1576 break;
1577 case SDLA_S507:
1578 flp->state |= SDLA_MEMEN;
1579 break;
1580 case SDLA_S508:
1581 flp->state = SDLA_MEMEN;
1582 break;
1583 }
1584 outb(flp->state, base + SDLA_REG_CONTROL);
1585
1586 dev->irq = map->irq;
1587 dev->base_addr = base;
1588 dev->mem_start = map->mem_start;
1589 dev->mem_end = dev->mem_start + 0x2000;
1590 flp->initialized = 1;
1591 return 0;
1592
1593fail2:
1594 free_irq(map->irq, dev);
1595fail:
1596 release_region(base, SDLA_IO_EXTENTS);
1597 return err;
1598}
1599
1600static struct net_device_stats *sdla_stats(struct net_device *dev)
1601{
1602 struct frad_local *flp;
1603 flp = dev->priv;
1604
1605 return(&flp->stats);
1606}
1607
1608static void setup_sdla(struct net_device *dev)
1609{
1610 struct frad_local *flp = dev->priv;
1611
1612 netdev_boot_setup_check(dev);
1613
1614 SET_MODULE_OWNER(dev);
1615 dev->flags = 0;
1616 dev->type = 0xFFFF;
1617 dev->hard_header_len = 0;
1618 dev->addr_len = 0;
1619 dev->mtu = SDLA_MAX_MTU;
1620
1621 dev->open = sdla_open;
1622 dev->stop = sdla_close;
1623 dev->do_ioctl = sdla_ioctl;
1624 dev->set_config = sdla_set_config;
1625 dev->get_stats = sdla_stats;
1626 dev->hard_start_xmit = sdla_transmit;
1627 dev->change_mtu = sdla_change_mtu;
1628
1629 flp->activate = sdla_activate;
1630 flp->deactivate = sdla_deactivate;
1631 flp->assoc = sdla_assoc;
1632 flp->deassoc = sdla_deassoc;
1633 flp->dlci_conf = sdla_dlci_conf;
1634
1635 init_timer(&flp->timer);
1636 flp->timer.expires = 1;
1637 flp->timer.data = (unsigned long) dev;
1638 flp->timer.function = sdla_poll;
1639}
1640
1641static struct net_device *sdla;
1642
1643static int __init init_sdla(void)
1644{
1645 int err;
1646
1647 printk("%s.\n", version);
1648
1649 sdla = alloc_netdev(sizeof(struct frad_local), "sdla0", setup_sdla);
1650 if (!sdla)
1651 return -ENOMEM;
1652
1653 err = register_netdev(sdla);
1654 if (err)
1655 free_netdev(sdla);
1656
1657 return err;
1658}
1659
1660static void __exit exit_sdla(void)
1661{
1662 struct frad_local *flp = sdla->priv;
1663
1664 unregister_netdev(sdla);
1665 if (flp->initialized) {
1666 free_irq(sdla->irq, sdla);
1667 release_region(sdla->base_addr, SDLA_IO_EXTENTS);
1668 }
1669 del_timer_sync(&flp->timer);
1670 free_netdev(sdla);
1671}
1672
1673MODULE_LICENSE("GPL");
1674
1675module_init(init_sdla);
1676module_exit(exit_sdla);
diff --git a/drivers/net/wan/sdla_chdlc.c b/drivers/net/wan/sdla_chdlc.c
new file mode 100644
index 000000000000..afbe0024e3e1
--- /dev/null
+++ b/drivers/net/wan/sdla_chdlc.c
@@ -0,0 +1,4433 @@
1/*****************************************************************************
2* sdla_chdlc.c WANPIPE(tm) Multiprotocol WAN Link Driver. Cisco HDLC module.
3*
4* Authors: Nenad Corbic <ncorbic@sangoma.com>
5* Gideon Hack
6*
7* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
8*
9* This program is free software; you can redistribute it and/or
10* modify it under the terms of the GNU General Public License
11* as published by the Free Software Foundation; either version
12* 2 of the License, or (at your option) any later version.
13* ============================================================================
14* Feb 28, 2001 Nenad Corbic Updated if_tx_timeout() routine for
15* 2.4.X kernels.
16* Jan 25, 2001 Nenad Corbic Added a TTY Sync serial driver over the
17* HDLC streaming protocol
18* Added a TTY Async serial driver over the
19* Async protocol.
20* Dec 15, 2000 Nenad Corbic Updated for 2.4.X Kernel support
21* Nov 13, 2000 Nenad Corbic Added true interface type encoding option.
22* Tcpdump doesn't support CHDLC inteface
23* types, to fix this "true type" option will set
24* the interface type to RAW IP mode.
25* Nov 07, 2000 Nenad Corbic Added security features for UDP debugging:
26* Deny all and specify allowed requests.
27* Jun 20, 2000 Nenad Corbic Fixed the API IP ERROR bug. Caused by the
28* latest update.
29* May 09, 2000 Nenad Corbic Option to bring down an interface
30* upon disconnect.
31* Mar 23, 2000 Nenad Corbic Improved task queue, bh handling.
32* Mar 16, 2000 Nenad Corbic Fixed the SLARP Dynamic IP addressing.
33* Mar 06, 2000 Nenad Corbic Bug Fix: corrupted mbox recovery.
34* Feb 10, 2000 Gideon Hack Added ASYNC support.
35* Feb 09, 2000 Nenad Corbic Fixed two shutdown bugs in update() and
36* if_stats() functions.
37* Jan 24, 2000 Nenad Corbic Fixed a startup wanpipe state racing,
38* condition between if_open and isr.
39* Jan 10, 2000 Nenad Corbic Added new socket API support.
40* Dev 15, 1999 Nenad Corbic Fixed up header files for 2.0.X kernels
41* Nov 20, 1999 Nenad Corbic Fixed zero length API bug.
42* Sep 30, 1999 Nenad Corbic Fixed dynamic IP and route setup.
43* Sep 23, 1999 Nenad Corbic Added SMP support, fixed tracing
44* Sep 13, 1999 Nenad Corbic Split up Port 0 and 1 into separate devices.
45* Jun 02, 1999 Gideon Hack Added support for the S514 adapter.
46* Oct 30, 1998 Jaspreet Singh Added Support for CHDLC API (HDLC STREAMING).
47* Oct 28, 1998 Jaspreet Singh Added Support for Dual Port CHDLC.
48* Aug 07, 1998 David Fong Initial version.
49*****************************************************************************/
50
51#include <linux/module.h>
52#include <linux/kernel.h> /* printk(), and other useful stuff */
53#include <linux/stddef.h> /* offsetof(), etc. */
54#include <linux/errno.h> /* return codes */
55#include <linux/string.h> /* inline memset(), etc. */
56#include <linux/slab.h> /* kmalloc(), kfree() */
57#include <linux/wanrouter.h> /* WAN router definitions */
58#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
59#include <linux/if_arp.h> /* ARPHRD_* defines */
60
61
62#include <asm/uaccess.h>
63#include <linux/inetdevice.h>
64#include <linux/netdevice.h>
65
66#include <linux/in.h> /* sockaddr_in */
67#include <linux/inet.h>
68#include <linux/if.h>
69#include <asm/byteorder.h> /* htons(), etc. */
70#include <linux/sdlapci.h>
71#include <asm/io.h>
72
73#include <linux/sdla_chdlc.h> /* CHDLC firmware API definitions */
74#include <linux/sdla_asy.h> /* CHDLC (async) API definitions */
75
76#include <linux/if_wanpipe_common.h> /* Socket Driver common area */
77#include <linux/if_wanpipe.h>
78
79/* TTY Includes */
80#include <linux/tty.h>
81#include <linux/tty_flip.h>
82#include <linux/serial.h>
83
84
85/****** Defines & Macros ****************************************************/
86
87/* reasons for enabling the timer interrupt on the adapter */
88#define TMR_INT_ENABLED_UDP 0x01
89#define TMR_INT_ENABLED_UPDATE 0x02
90#define TMR_INT_ENABLED_CONFIG 0x10
91
92#define MAX_IP_ERRORS 10
93
94#define TTY_CHDLC_MAX_MTU 2000
95#define CHDLC_DFLT_DATA_LEN 1500 /* default MTU */
96#define CHDLC_HDR_LEN 1
97
98#define CHDLC_API 0x01
99
100#define PORT(x) (x == 0 ? "PRIMARY" : "SECONDARY" )
101#define MAX_BH_BUFF 10
102
103//#define PRINT_DEBUG
104#ifdef PRINT_DEBUG
105#define dbg_printk(format, a...) printk(format, ## a)
106#else
107#define dbg_printk(format, a...)
108#endif
109
110/******Data Structures*****************************************************/
111
112/* This structure is placed in the private data area of the device structure.
113 * The card structure used to occupy the private area but now the following
114 * structure will incorporate the card structure along with CHDLC specific data
115 */
116
117typedef struct chdlc_private_area
118{
119 wanpipe_common_t common;
120 sdla_t *card;
121 int TracingEnabled; /* For enabling Tracing */
122 unsigned long curr_trace_addr; /* Used for Tracing */
123 unsigned long start_trace_addr;
124 unsigned long end_trace_addr;
125 unsigned long base_addr_trace_buffer;
126 unsigned long end_addr_trace_buffer;
127 unsigned short number_trace_elements;
128 unsigned available_buffer_space;
129 unsigned long router_start_time;
130 unsigned char route_status;
131 unsigned char route_removed;
132 unsigned long tick_counter; /* For 5s timeout counter */
133 unsigned long router_up_time;
134 u32 IP_address; /* IP addressing */
135 u32 IP_netmask;
136 u32 ip_local;
137 u32 ip_remote;
138 u32 ip_local_tmp;
139 u32 ip_remote_tmp;
140 u8 ip_error;
141 u8 config_chdlc;
142 u8 config_chdlc_timeout;
143 unsigned char mc; /* Mulitcast support on/off */
144 unsigned short udp_pkt_lgth; /* udp packet processing */
145 char udp_pkt_src;
146 char udp_pkt_data[MAX_LGTH_UDP_MGNT_PKT];
147 unsigned short timer_int_enabled;
148 char update_comms_stats; /* updating comms stats */
149
150 bh_data_t *bh_head; /* Circular buffer for chdlc_bh */
151 unsigned long tq_working;
152 volatile int bh_write;
153 volatile int bh_read;
154 atomic_t bh_buff_used;
155
156 unsigned char interface_down;
157
158 /* Polling work queue entry. Each interface
159 * has its own work queue entry, which is used
160 * to defer events from the interrupt */
161 struct work_struct poll_work;
162 struct timer_list poll_delay_timer;
163
164 u8 gateway;
165 u8 true_if_encoding;
166 //FIXME: add driver stats as per frame relay!
167
168} chdlc_private_area_t;
169
170/* Route Status options */
171#define NO_ROUTE 0x00
172#define ADD_ROUTE 0x01
173#define ROUTE_ADDED 0x02
174#define REMOVE_ROUTE 0x03
175
176
177/* variable for keeping track of enabling/disabling FT1 monitor status */
178static int rCount = 0;
179
180/* variable for tracking how many interfaces to open for WANPIPE on the
181 two ports */
182
183extern void disable_irq(unsigned int);
184extern void enable_irq(unsigned int);
185
186/****** Function Prototypes *************************************************/
187/* WAN link driver entry points. These are called by the WAN router module. */
188static int update(struct wan_device* wandev);
189static int new_if(struct wan_device* wandev, struct net_device* dev,
190 wanif_conf_t* conf);
191
192/* Network device interface */
193static int if_init(struct net_device* dev);
194static int if_open(struct net_device* dev);
195static int if_close(struct net_device* dev);
196static int if_header(struct sk_buff* skb, struct net_device* dev,
197 unsigned short type, void* daddr, void* saddr,
198 unsigned len);
199
200static int if_rebuild_hdr (struct sk_buff *skb);
201static struct net_device_stats* if_stats(struct net_device* dev);
202
203static int if_send(struct sk_buff* skb, struct net_device* dev);
204
205/* CHDLC Firmware interface functions */
206static int chdlc_configure (sdla_t* card, void* data);
207static int chdlc_comm_enable (sdla_t* card);
208static int chdlc_read_version (sdla_t* card, char* str);
209static int chdlc_set_intr_mode (sdla_t* card, unsigned mode);
210static int chdlc_send (sdla_t* card, void* data, unsigned len);
211static int chdlc_read_comm_err_stats (sdla_t* card);
212static int chdlc_read_op_stats (sdla_t* card);
213static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb);
214
215
216static int chdlc_disable_comm_shutdown (sdla_t *card);
217static void if_tx_timeout(struct net_device *dev);
218
219/* Miscellaneous CHDLC Functions */
220static int set_chdlc_config (sdla_t* card);
221static void init_chdlc_tx_rx_buff( sdla_t* card);
222static int process_chdlc_exception(sdla_t *card);
223static int process_global_exception(sdla_t *card);
224static int update_comms_stats(sdla_t* card,
225 chdlc_private_area_t* chdlc_priv_area);
226static int configure_ip (sdla_t* card);
227static int unconfigure_ip (sdla_t* card);
228static void process_route(sdla_t *card);
229static void port_set_state (sdla_t *card, int);
230static int config_chdlc (sdla_t *card);
231static void disable_comm (sdla_t *card);
232
233static void trigger_chdlc_poll(struct net_device *dev);
234static void chdlc_poll(struct net_device *dev);
235static void chdlc_poll_delay (unsigned long dev_ptr);
236
237
238/* Miscellaneous asynchronous interface Functions */
239static int set_asy_config (sdla_t* card);
240static int asy_comm_enable (sdla_t* card);
241
242/* Interrupt handlers */
243static void wpc_isr (sdla_t* card);
244static void rx_intr (sdla_t* card);
245static void timer_intr(sdla_t *);
246
247/* Bottom half handlers */
248static void chdlc_work(struct net_device *dev);
249static int chdlc_work_cleanup(struct net_device *dev);
250static int bh_enqueue(struct net_device *dev, struct sk_buff *skb);
251
252/* Miscellaneous functions */
253static int chk_bcast_mcast_addr(sdla_t* card, struct net_device* dev,
254 struct sk_buff *skb);
255static int reply_udp( unsigned char *data, unsigned int mbox_len );
256static int intr_test( sdla_t* card);
257static int udp_pkt_type( struct sk_buff *skb , sdla_t* card);
258static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
259 struct sk_buff *skb, struct net_device* dev,
260 chdlc_private_area_t* chdlc_priv_area);
261static int process_udp_mgmt_pkt(sdla_t* card, struct net_device* dev,
262 chdlc_private_area_t* chdlc_priv_area);
263static unsigned short calc_checksum (char *, int);
264static void s508_lock (sdla_t *card, unsigned long *smp_flags);
265static void s508_unlock (sdla_t *card, unsigned long *smp_flags);
266
267
268static int Intr_test_counter;
269
270/* TTY Global Definitions */
271
272#define NR_PORTS 4
273#define WAN_TTY_MAJOR 226
274#define WAN_TTY_MINOR 0
275
276#define WAN_CARD(port) (tty_card_map[port])
277#define MIN_PORT 0
278#define MAX_PORT NR_PORTS-1
279
280#define CRC_LENGTH 2
281
282static int wanpipe_tty_init(sdla_t *card);
283static void wanpipe_tty_receive(sdla_t *, unsigned, unsigned int);
284static void wanpipe_tty_trigger_poll(sdla_t *card);
285
286static struct tty_driver serial_driver;
287static int tty_init_cnt=0;
288
289static struct serial_state rs_table[NR_PORTS];
290
291static char tty_driver_mode=WANOPT_TTY_SYNC;
292
293static char *opt_decode[] = {"NONE","CRTSCTS","XONXOFF-RX",
294 "CRTSCTS XONXOFF-RX","XONXOFF-TX",
295 "CRTSCTS XONXOFF-TX","CRTSCTS XONXOFF"};
296static char *p_decode[] = {"NONE","ODD","EVEN"};
297
298static void* tty_card_map[NR_PORTS] = {NULL,NULL,NULL,NULL};
299
300
301/****** Public Functions ****************************************************/
302
303/*============================================================================
304 * Cisco HDLC protocol initialization routine.
305 *
306 * This routine is called by the main WANPIPE module during setup. At this
307 * point adapter is completely initialized and firmware is running.
308 * o read firmware version (to make sure it's alive)
309 * o configure adapter
310 * o initialize protocol-specific fields of the adapter data space.
311 *
312 * Return: 0 o.k.
313 * < 0 failure.
314 */
315int wpc_init (sdla_t* card, wandev_conf_t* conf)
316{
317 unsigned char port_num;
318 int err;
319 unsigned long max_permitted_baud = 0;
320 SHARED_MEMORY_INFO_STRUCT *flags;
321
322 union
323 {
324 char str[80];
325 } u;
326 volatile CHDLC_MAILBOX_STRUCT* mb;
327 CHDLC_MAILBOX_STRUCT* mb1;
328 unsigned long timeout;
329
330 /* Verify configuration ID */
331 if (conf->config_id != WANCONFIG_CHDLC) {
332 printk(KERN_INFO "%s: invalid configuration ID %u!\n",
333 card->devname, conf->config_id);
334 return -EINVAL;
335 }
336
337 /* Find out which Port to use */
338 if ((conf->comm_port == WANOPT_PRI) || (conf->comm_port == WANOPT_SEC)){
339 if (card->next){
340
341 if (conf->comm_port != card->next->u.c.comm_port){
342 card->u.c.comm_port = conf->comm_port;
343 }else{
344 printk(KERN_INFO "%s: ERROR - %s port used!\n",
345 card->wandev.name, PORT(conf->comm_port));
346 return -EINVAL;
347 }
348 }else{
349 card->u.c.comm_port = conf->comm_port;
350 }
351 }else{
352 printk(KERN_INFO "%s: ERROR - Invalid Port Selected!\n",
353 card->wandev.name);
354 return -EINVAL;
355 }
356
357
358 /* Initialize protocol-specific fields */
359 if(card->hw.type != SDLA_S514){
360
361 if (card->u.c.comm_port == WANOPT_PRI){
362 card->mbox = (void *) card->hw.dpmbase;
363 }else{
364 card->mbox = (void *) card->hw.dpmbase +
365 SEC_BASE_ADDR_MB_STRUCT - PRI_BASE_ADDR_MB_STRUCT;
366 }
367 }else{
368 /* for a S514 adapter, set a pointer to the actual mailbox in the */
369 /* allocated virtual memory area */
370 if (card->u.c.comm_port == WANOPT_PRI){
371 card->mbox = (void *) card->hw.dpmbase + PRI_BASE_ADDR_MB_STRUCT;
372 }else{
373 card->mbox = (void *) card->hw.dpmbase + SEC_BASE_ADDR_MB_STRUCT;
374 }
375 }
376
377 mb = mb1 = card->mbox;
378
379 if (!card->configured){
380
381 /* The board will place an 'I' in the return code to indicate that it is
382 ready to accept commands. We expect this to be completed in less
383 than 1 second. */
384
385 timeout = jiffies;
386 while (mb->return_code != 'I') /* Wait 1s for board to initialize */
387 if ((jiffies - timeout) > 1*HZ) break;
388
389 if (mb->return_code != 'I') {
390 printk(KERN_INFO
391 "%s: Initialization not completed by adapter\n",
392 card->devname);
393 printk(KERN_INFO "Please contact Sangoma representative.\n");
394 return -EIO;
395 }
396 }
397
398 /* Read firmware version. Note that when adapter initializes, it
399 * clears the mailbox, so it may appear that the first command was
400 * executed successfully when in fact it was merely erased. To work
401 * around this, we execute the first command twice.
402 */
403
404 if (chdlc_read_version(card, u.str))
405 return -EIO;
406
407 printk(KERN_INFO "%s: Running Cisco HDLC firmware v%s\n",
408 card->devname, u.str);
409
410 card->isr = &wpc_isr;
411 card->poll = NULL;
412 card->exec = NULL;
413 card->wandev.update = &update;
414 card->wandev.new_if = &new_if;
415 card->wandev.del_if = NULL;
416 card->wandev.udp_port = conf->udp_port;
417 card->disable_comm = &disable_comm;
418 card->wandev.new_if_cnt = 0;
419
420 /* reset the number of times the 'update()' proc has been called */
421 card->u.c.update_call_count = 0;
422
423 card->wandev.ttl = conf->ttl;
424 card->wandev.interface = conf->interface;
425
426 if ((card->u.c.comm_port == WANOPT_SEC && conf->interface == WANOPT_V35)&&
427 card->hw.type != SDLA_S514){
428 printk(KERN_INFO "%s: ERROR - V35 Interface not supported on S508 %s port \n",
429 card->devname, PORT(card->u.c.comm_port));
430 return -EIO;
431 }
432
433 card->wandev.clocking = conf->clocking;
434
435 port_num = card->u.c.comm_port;
436
437 /* in API mode, we can configure for "receive only" buffering */
438 if(card->hw.type == SDLA_S514) {
439 card->u.c.receive_only = conf->receive_only;
440 if(conf->receive_only) {
441 printk(KERN_INFO
442 "%s: Configured for 'receive only' mode\n",
443 card->devname);
444 }
445 }
446
447 /* Setup Port Bps */
448
449 if(card->wandev.clocking) {
450 if((port_num == WANOPT_PRI) || card->u.c.receive_only) {
451 /* For Primary Port 0 */
452 max_permitted_baud =
453 (card->hw.type == SDLA_S514) ?
454 PRI_MAX_BAUD_RATE_S514 :
455 PRI_MAX_BAUD_RATE_S508;
456
457 }else if(port_num == WANOPT_SEC) {
458 /* For Secondary Port 1 */
459 max_permitted_baud =
460 (card->hw.type == SDLA_S514) ?
461 SEC_MAX_BAUD_RATE_S514 :
462 SEC_MAX_BAUD_RATE_S508;
463 }
464
465 if(conf->bps > max_permitted_baud) {
466 conf->bps = max_permitted_baud;
467 printk(KERN_INFO "%s: Baud too high!\n",
468 card->wandev.name);
469 printk(KERN_INFO "%s: Baud rate set to %lu bps\n",
470 card->wandev.name, max_permitted_baud);
471 }
472 card->wandev.bps = conf->bps;
473 }else{
474 card->wandev.bps = 0;
475 }
476
477 /* Setup the Port MTU */
478 if((port_num == WANOPT_PRI) || card->u.c.receive_only) {
479
480 /* For Primary Port 0 */
481 card->wandev.mtu =
482 (conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
483 min_t(unsigned int, conf->mtu, PRI_MAX_NO_DATA_BYTES_IN_FRAME) :
484 CHDLC_DFLT_DATA_LEN;
485 } else if(port_num == WANOPT_SEC) {
486 /* For Secondary Port 1 */
487 card->wandev.mtu =
488 (conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
489 min_t(unsigned int, conf->mtu, SEC_MAX_NO_DATA_BYTES_IN_FRAME) :
490 CHDLC_DFLT_DATA_LEN;
491 }
492
493 /* Set up the interrupt status area */
494 /* Read the CHDLC Configuration and obtain:
495 * Ptr to shared memory infor struct
496 * Use this pointer to calculate the value of card->u.c.flags !
497 */
498 mb1->buffer_length = 0;
499 mb1->command = READ_CHDLC_CONFIGURATION;
500 err = sdla_exec(mb1) ? mb1->return_code : CMD_TIMEOUT;
501 if(err != COMMAND_OK) {
502 if(card->hw.type != SDLA_S514)
503 enable_irq(card->hw.irq);
504
505 chdlc_error(card, err, mb1);
506 return -EIO;
507 }
508
509 if(card->hw.type == SDLA_S514){
510 card->u.c.flags = (void *)(card->hw.dpmbase +
511 (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
512 ptr_shared_mem_info_struct));
513 }else{
514 card->u.c.flags = (void *)(card->hw.dpmbase +
515 (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
516 ptr_shared_mem_info_struct % SDLA_WINDOWSIZE));
517 }
518
519 flags = card->u.c.flags;
520
521 /* This is for the ports link state */
522 card->wandev.state = WAN_DUALPORT;
523 card->u.c.state = WAN_DISCONNECTED;
524
525
526 if (!card->wandev.piggyback){
527 int err;
528
529 /* Perform interrupt testing */
530 err = intr_test(card);
531
532 if(err || (Intr_test_counter < MAX_INTR_TEST_COUNTER)) {
533 printk(KERN_INFO "%s: Interrupt test failed (%i)\n",
534 card->devname, Intr_test_counter);
535 printk(KERN_INFO "%s: Please choose another interrupt\n",
536 card->devname);
537 return -EIO;
538 }
539
540 printk(KERN_INFO "%s: Interrupt test passed (%i)\n",
541 card->devname, Intr_test_counter);
542 card->configured = 1;
543 }
544
545 if ((card->tty_opt=conf->tty) == WANOPT_YES){
546 int err;
547 card->tty_minor = conf->tty_minor;
548
549 /* On ASYNC connections internal clocking
550 * is mandatory */
551 if ((card->u.c.async_mode = conf->tty_mode)){
552 card->wandev.clocking = 1;
553 }
554 err=wanpipe_tty_init(card);
555 if (err){
556 return err;
557 }
558 }else{
559
560
561 if (chdlc_set_intr_mode(card, APP_INT_ON_TIMER)){
562 printk (KERN_INFO "%s: "
563 "Failed to set interrupt triggers!\n",
564 card->devname);
565 return -EIO;
566 }
567
568 /* Mask the Timer interrupt */
569 flags->interrupt_info_struct.interrupt_permission &=
570 ~APP_INT_ON_TIMER;
571 }
572
573 /* If we are using CHDLC in backup mode, this flag will
574 * indicate not to look for IP addresses in config_chdlc()*/
575 card->u.c.backup = conf->backup;
576
577 printk(KERN_INFO "\n");
578
579 return 0;
580}
581
582/******* WAN Device Driver Entry Points *************************************/
583
584/*============================================================================
585 * Update device status & statistics
586 * This procedure is called when updating the PROC file system and returns
587 * various communications statistics. These statistics are accumulated from 3
588 * different locations:
589 * 1) The 'if_stats' recorded for the device.
590 * 2) Communication error statistics on the adapter.
591 * 3) CHDLC operational statistics on the adapter.
592 * The board level statistics are read during a timer interrupt. Note that we
593 * read the error and operational statistics during consecitive timer ticks so
594 * as to minimize the time that we are inside the interrupt handler.
595 *
596 */
597static int update(struct wan_device* wandev)
598{
599 sdla_t* card = wandev->private;
600 struct net_device* dev;
601 volatile chdlc_private_area_t* chdlc_priv_area;
602 SHARED_MEMORY_INFO_STRUCT *flags;
603 unsigned long timeout;
604
605 /* sanity checks */
606 if((wandev == NULL) || (wandev->private == NULL))
607 return -EFAULT;
608
609 if(wandev->state == WAN_UNCONFIGURED)
610 return -ENODEV;
611
612 /* more sanity checks */
613 if(!card->u.c.flags)
614 return -ENODEV;
615
616 if(test_bit(PERI_CRIT, (void*)&card->wandev.critical))
617 return -EAGAIN;
618
619 if((dev=card->wandev.dev) == NULL)
620 return -ENODEV;
621
622 if((chdlc_priv_area=dev->priv) == NULL)
623 return -ENODEV;
624
625 flags = card->u.c.flags;
626 if(chdlc_priv_area->update_comms_stats){
627 return -EAGAIN;
628 }
629
630 /* we will need 2 timer interrupts to complete the */
631 /* reading of the statistics */
632 chdlc_priv_area->update_comms_stats = 2;
633 flags->interrupt_info_struct.interrupt_permission |= APP_INT_ON_TIMER;
634 chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UPDATE;
635
636 /* wait a maximum of 1 second for the statistics to be updated */
637 timeout = jiffies;
638 for(;;) {
639 if(chdlc_priv_area->update_comms_stats == 0)
640 break;
641 if ((jiffies - timeout) > (1 * HZ)){
642 chdlc_priv_area->update_comms_stats = 0;
643 chdlc_priv_area->timer_int_enabled &=
644 ~TMR_INT_ENABLED_UPDATE;
645 return -EAGAIN;
646 }
647 }
648
649 return 0;
650}
651
652
653/*============================================================================
654 * Create new logical channel.
655 * This routine is called by the router when ROUTER_IFNEW IOCTL is being
656 * handled.
657 * o parse media- and hardware-specific configuration
658 * o make sure that a new channel can be created
659 * o allocate resources, if necessary
660 * o prepare network device structure for registaration.
661 *
662 * Return: 0 o.k.
663 * < 0 failure (channel will not be created)
664 */
665static int new_if(struct wan_device* wandev, struct net_device* dev,
666 wanif_conf_t* conf)
667{
668 sdla_t* card = wandev->private;
669 chdlc_private_area_t* chdlc_priv_area;
670
671
672 printk(KERN_INFO "%s: Configuring Interface: %s\n",
673 card->devname, conf->name);
674
675 if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)) {
676 printk(KERN_INFO "%s: Invalid interface name!\n",
677 card->devname);
678 return -EINVAL;
679 }
680
681 /* allocate and initialize private data */
682 chdlc_priv_area = kmalloc(sizeof(chdlc_private_area_t), GFP_KERNEL);
683
684 if(chdlc_priv_area == NULL)
685 return -ENOMEM;
686
687 memset(chdlc_priv_area, 0, sizeof(chdlc_private_area_t));
688
689 chdlc_priv_area->card = card;
690 chdlc_priv_area->common.sk = NULL;
691 chdlc_priv_area->common.func = NULL;
692
693 /* initialize data */
694 strcpy(card->u.c.if_name, conf->name);
695
696 if(card->wandev.new_if_cnt > 0) {
697 kfree(chdlc_priv_area);
698 return -EEXIST;
699 }
700
701 card->wandev.new_if_cnt++;
702
703 chdlc_priv_area->TracingEnabled = 0;
704 chdlc_priv_area->route_status = NO_ROUTE;
705 chdlc_priv_area->route_removed = 0;
706
707 card->u.c.async_mode = conf->async_mode;
708
709 /* setup for asynchronous mode */
710 if(conf->async_mode) {
711 printk(KERN_INFO "%s: Configuring for asynchronous mode\n",
712 wandev->name);
713
714 if(card->u.c.comm_port == WANOPT_PRI) {
715 printk(KERN_INFO
716 "%s:Asynchronous mode on secondary port only\n",
717 wandev->name);
718 kfree(chdlc_priv_area);
719 return -EINVAL;
720 }
721
722 if(strcmp(conf->usedby, "WANPIPE") == 0) {
723 printk(KERN_INFO
724 "%s: Running in WANIPE Async Mode\n", wandev->name);
725 card->u.c.usedby = WANPIPE;
726 }else{
727 card->u.c.usedby = API;
728 }
729
730 if(!card->wandev.clocking) {
731 printk(KERN_INFO
732 "%s: Asynch. clocking must be 'Internal'\n",
733 wandev->name);
734 kfree(chdlc_priv_area);
735 return -EINVAL;
736 }
737
738 if((card->wandev.bps < MIN_ASY_BAUD_RATE) ||
739 (card->wandev.bps > MAX_ASY_BAUD_RATE)) {
740 printk(KERN_INFO "%s: Selected baud rate is invalid.\n",
741 wandev->name);
742 printk(KERN_INFO "Must be between %u and %u bps.\n",
743 MIN_ASY_BAUD_RATE, MAX_ASY_BAUD_RATE);
744 kfree(chdlc_priv_area);
745 return -EINVAL;
746 }
747
748 card->u.c.api_options = 0;
749 if (conf->asy_data_trans == WANOPT_YES) {
750 card->u.c.api_options |= ASY_RX_DATA_TRANSPARENT;
751 }
752
753 card->u.c.protocol_options = 0;
754 if (conf->rts_hs_for_receive == WANOPT_YES) {
755 card->u.c.protocol_options |= ASY_RTS_HS_FOR_RX;
756 }
757 if (conf->xon_xoff_hs_for_receive == WANOPT_YES) {
758 card->u.c.protocol_options |= ASY_XON_XOFF_HS_FOR_RX;
759 }
760 if (conf->xon_xoff_hs_for_transmit == WANOPT_YES) {
761 card->u.c.protocol_options |= ASY_XON_XOFF_HS_FOR_TX;
762 }
763 if (conf->dcd_hs_for_transmit == WANOPT_YES) {
764 card->u.c.protocol_options |= ASY_DCD_HS_FOR_TX;
765 }
766 if (conf->cts_hs_for_transmit == WANOPT_YES) {
767 card->u.c.protocol_options |= ASY_CTS_HS_FOR_TX;
768 }
769
770 card->u.c.tx_bits_per_char = conf->tx_bits_per_char;
771 card->u.c.rx_bits_per_char = conf->rx_bits_per_char;
772 card->u.c.stop_bits = conf->stop_bits;
773 card->u.c.parity = conf->parity;
774 card->u.c.break_timer = conf->break_timer;
775 card->u.c.inter_char_timer = conf->inter_char_timer;
776 card->u.c.rx_complete_length = conf->rx_complete_length;
777 card->u.c.xon_char = conf->xon_char;
778
779 } else { /* setup for synchronous mode */
780
781 card->u.c.protocol_options = 0;
782 if (conf->ignore_dcd == WANOPT_YES){
783 card->u.c.protocol_options |= IGNORE_DCD_FOR_LINK_STAT;
784 }
785 if (conf->ignore_cts == WANOPT_YES){
786 card->u.c.protocol_options |= IGNORE_CTS_FOR_LINK_STAT;
787 }
788
789 if (conf->ignore_keepalive == WANOPT_YES) {
790 card->u.c.protocol_options |=
791 IGNORE_KPALV_FOR_LINK_STAT;
792 card->u.c.kpalv_tx = MIN_Tx_KPALV_TIMER;
793 card->u.c.kpalv_rx = MIN_Rx_KPALV_TIMER;
794 card->u.c.kpalv_err = MIN_KPALV_ERR_TOL;
795
796 } else { /* Do not ignore keepalives */
797 card->u.c.kpalv_tx =
798 ((conf->keepalive_tx_tmr - MIN_Tx_KPALV_TIMER)
799 >= 0) ?
800 min_t(unsigned int, conf->keepalive_tx_tmr,MAX_Tx_KPALV_TIMER) :
801 DEFAULT_Tx_KPALV_TIMER;
802
803 card->u.c.kpalv_rx =
804 ((conf->keepalive_rx_tmr - MIN_Rx_KPALV_TIMER)
805 >= 0) ?
806 min_t(unsigned int, conf->keepalive_rx_tmr,MAX_Rx_KPALV_TIMER) :
807 DEFAULT_Rx_KPALV_TIMER;
808
809 card->u.c.kpalv_err =
810 ((conf->keepalive_err_margin-MIN_KPALV_ERR_TOL)
811 >= 0) ?
812 min_t(unsigned int, conf->keepalive_err_margin,
813 MAX_KPALV_ERR_TOL) :
814 DEFAULT_KPALV_ERR_TOL;
815 }
816
817 /* Setup slarp timer to control delay between slarps */
818 card->u.c.slarp_timer =
819 ((conf->slarp_timer - MIN_SLARP_REQ_TIMER) >= 0) ?
820 min_t(unsigned int, conf->slarp_timer, MAX_SLARP_REQ_TIMER) :
821 DEFAULT_SLARP_REQ_TIMER;
822
823 if (conf->hdlc_streaming == WANOPT_YES) {
824 printk(KERN_INFO "%s: Enabling HDLC STREAMING Mode\n",
825 wandev->name);
826 card->u.c.protocol_options = HDLC_STREAMING_MODE;
827 }
828
829 if ((chdlc_priv_area->true_if_encoding = conf->true_if_encoding) == WANOPT_YES){
830 printk(KERN_INFO
831 "%s: Enabling, true interface type encoding.\n",
832 card->devname);
833 }
834
835 /* Setup wanpipe as a router (WANPIPE) or as an API */
836 if( strcmp(conf->usedby, "WANPIPE") == 0) {
837
838 printk(KERN_INFO "%s: Running in WANPIPE mode!\n",
839 wandev->name);
840 card->u.c.usedby = WANPIPE;
841
842 /* Option to bring down the interface when
843 * the link goes down */
844 if (conf->if_down){
845 set_bit(DYN_OPT_ON,&chdlc_priv_area->interface_down);
846 printk(KERN_INFO
847 "%s: Dynamic interface configuration enabled\n",
848 card->devname);
849 }
850
851 } else if( strcmp(conf->usedby, "API") == 0) {
852 card->u.c.usedby = API;
853 printk(KERN_INFO "%s: Running in API mode !\n",
854 wandev->name);
855 }
856 }
857
858 /* Tells us that if this interface is a
859 * gateway or not */
860 if ((chdlc_priv_area->gateway = conf->gateway) == WANOPT_YES){
861 printk(KERN_INFO "%s: Interface %s is set as a gateway.\n",
862 card->devname,card->u.c.if_name);
863 }
864
865 /* Get Multicast Information */
866 chdlc_priv_area->mc = conf->mc;
867
868 /* prepare network device data space for registration */
869 strcpy(dev->name,card->u.c.if_name);
870
871 dev->init = &if_init;
872 dev->priv = chdlc_priv_area;
873
874 /* Initialize the polling work routine */
875 INIT_WORK(&chdlc_priv_area->poll_work, (void*)(void*)chdlc_poll, dev);
876
877 /* Initialize the polling delay timer */
878 init_timer(&chdlc_priv_area->poll_delay_timer);
879 chdlc_priv_area->poll_delay_timer.data = (unsigned long)dev;
880 chdlc_priv_area->poll_delay_timer.function = chdlc_poll_delay;
881
882 printk(KERN_INFO "\n");
883
884 return 0;
885}
886
887
888/****** Network Device Interface ********************************************/
889
890/*============================================================================
891 * Initialize Linux network interface.
892 *
893 * This routine is called only once for each interface, during Linux network
894 * interface registration. Returning anything but zero will fail interface
895 * registration.
896 */
897static int if_init(struct net_device* dev)
898{
899 chdlc_private_area_t* chdlc_priv_area = dev->priv;
900 sdla_t* card = chdlc_priv_area->card;
901 struct wan_device* wandev = &card->wandev;
902
903 /* Initialize device driver entry points */
904 dev->open = &if_open;
905 dev->stop = &if_close;
906 dev->hard_header = &if_header;
907 dev->rebuild_header = &if_rebuild_hdr;
908 dev->hard_start_xmit = &if_send;
909 dev->get_stats = &if_stats;
910 dev->tx_timeout = &if_tx_timeout;
911 dev->watchdog_timeo = TX_TIMEOUT;
912
913 /* Initialize media-specific parameters */
914 dev->flags |= IFF_POINTOPOINT;
915 dev->flags |= IFF_NOARP;
916
917 /* Enable Mulitcasting if user selected */
918 if (chdlc_priv_area->mc == WANOPT_YES){
919 dev->flags |= IFF_MULTICAST;
920 }
921
922 if (chdlc_priv_area->true_if_encoding){
923 dev->type = ARPHRD_HDLC; /* This breaks the tcpdump */
924 }else{
925 dev->type = ARPHRD_PPP;
926 }
927
928 dev->mtu = card->wandev.mtu;
929 /* for API usage, add the API header size to the requested MTU size */
930 if(card->u.c.usedby == API) {
931 dev->mtu += sizeof(api_tx_hdr_t);
932 }
933
934 dev->hard_header_len = CHDLC_HDR_LEN;
935
936 /* Initialize hardware parameters */
937 dev->irq = wandev->irq;
938 dev->dma = wandev->dma;
939 dev->base_addr = wandev->ioport;
940 dev->mem_start = wandev->maddr;
941 dev->mem_end = wandev->maddr + wandev->msize - 1;
942
943 /* Set transmit buffer queue length
944 * If too low packets will not be retransmitted
945 * by stack.
946 */
947 dev->tx_queue_len = 100;
948 SET_MODULE_OWNER(dev);
949
950 return 0;
951}
952
953/*============================================================================
954 * Open network interface.
955 * o enable communications and interrupts.
956 * o prevent module from unloading by incrementing use count
957 *
958 * Return 0 if O.k. or errno.
959 */
960static int if_open(struct net_device* dev)
961{
962 chdlc_private_area_t* chdlc_priv_area = dev->priv;
963 sdla_t* card = chdlc_priv_area->card;
964 struct timeval tv;
965 int err = 0;
966
967 /* Only one open per interface is allowed */
968
969 if (netif_running(dev))
970 return -EBUSY;
971
972 /* Initialize the work queue entry */
973 chdlc_priv_area->tq_working=0;
974
975 INIT_WORK(&chdlc_priv_area->common.wanpipe_work,
976 (void *)(void *)chdlc_work, dev);
977
978 /* Allocate and initialize BH circular buffer */
979 /* Add 1 to MAX_BH_BUFF so we don't have test with (MAX_BH_BUFF-1) */
980 chdlc_priv_area->bh_head = kmalloc((sizeof(bh_data_t)*(MAX_BH_BUFF+1)),GFP_ATOMIC);
981 memset(chdlc_priv_area->bh_head,0,(sizeof(bh_data_t)*(MAX_BH_BUFF+1)));
982 atomic_set(&chdlc_priv_area->bh_buff_used, 0);
983
984 do_gettimeofday(&tv);
985 chdlc_priv_area->router_start_time = tv.tv_sec;
986
987 netif_start_queue(dev);
988
989 wanpipe_open(card);
990
991 /* TTY is configured during wanpipe_set_termios
992 * call, not here */
993 if (card->tty_opt)
994 return err;
995
996 set_bit(0,&chdlc_priv_area->config_chdlc);
997 chdlc_priv_area->config_chdlc_timeout=jiffies;
998
999 /* Start the CHDLC configuration after 1sec delay.
1000 * This will give the interface initilization time
1001 * to finish its configuration */
1002 mod_timer(&chdlc_priv_area->poll_delay_timer, jiffies + HZ);
1003 return err;
1004}
1005
1006/*============================================================================
1007 * Close network interface.
1008 * o if this is the last close, then disable communications and interrupts.
1009 * o reset flags.
1010 */
1011static int if_close(struct net_device* dev)
1012{
1013 chdlc_private_area_t* chdlc_priv_area = dev->priv;
1014 sdla_t* card = chdlc_priv_area->card;
1015
1016 if (chdlc_priv_area->bh_head){
1017 int i;
1018 struct sk_buff *skb;
1019
1020 for (i=0; i<(MAX_BH_BUFF+1); i++){
1021 skb = ((bh_data_t *)&chdlc_priv_area->bh_head[i])->skb;
1022 if (skb != NULL){
1023 dev_kfree_skb_any(skb);
1024 }
1025 }
1026 kfree(chdlc_priv_area->bh_head);
1027 chdlc_priv_area->bh_head=NULL;
1028 }
1029
1030 netif_stop_queue(dev);
1031 wanpipe_close(card);
1032 del_timer(&chdlc_priv_area->poll_delay_timer);
1033 return 0;
1034}
1035
1036static void disable_comm (sdla_t *card)
1037{
1038 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
1039
1040 if (card->u.c.comm_enabled){
1041 chdlc_disable_comm_shutdown (card);
1042 }else{
1043 flags->interrupt_info_struct.interrupt_permission = 0;
1044 }
1045
1046 if (!tty_init_cnt)
1047 return;
1048
1049 if (card->tty_opt){
1050 struct serial_state * state;
1051 if (!(--tty_init_cnt)){
1052 int e1;
1053 serial_driver.refcount=0;
1054
1055 if ((e1 = tty_unregister_driver(&serial_driver)))
1056 printk("SERIAL: failed to unregister serial driver (%d)\n",
1057 e1);
1058 printk(KERN_INFO "%s: Unregistering TTY Driver, Major %i\n",
1059 card->devname,WAN_TTY_MAJOR);
1060 }
1061 card->tty=NULL;
1062 tty_card_map[card->tty_minor]=NULL;
1063 state = &rs_table[card->tty_minor];
1064 memset(state, 0, sizeof(*state));
1065 }
1066 return;
1067}
1068
1069
1070/*============================================================================
1071 * Build media header.
1072 *
1073 * The trick here is to put packet type (Ethertype) into 'protocol' field of
1074 * the socket buffer, so that we don't forget it. If packet type is not
1075 * supported, set skb->protocol to 0 and discard packet later.
1076 *
1077 * Return: media header length.
1078 */
1079static int if_header(struct sk_buff* skb, struct net_device* dev,
1080 unsigned short type, void* daddr, void* saddr,
1081 unsigned len)
1082{
1083 skb->protocol = htons(type);
1084
1085 return CHDLC_HDR_LEN;
1086}
1087
1088
1089/*============================================================================
1090 * Handle transmit timeout event from netif watchdog
1091 */
1092static void if_tx_timeout(struct net_device *dev)
1093{
1094 chdlc_private_area_t* chan = dev->priv;
1095 sdla_t *card = chan->card;
1096
1097 /* If our device stays busy for at least 5 seconds then we will
1098 * kick start the device by making dev->tbusy = 0. We expect
1099 * that our device never stays busy more than 5 seconds. So this
1100 * is only used as a last resort.
1101 */
1102
1103 ++card->wandev.stats.collisions;
1104
1105 printk (KERN_INFO "%s: Transmit timed out on %s\n", card->devname,dev->name);
1106 netif_wake_queue (dev);
1107}
1108
1109
1110
1111/*============================================================================
1112 * Re-build media header.
1113 *
1114 * Return: 1 physical address resolved.
1115 * 0 physical address not resolved
1116 */
1117static int if_rebuild_hdr (struct sk_buff *skb)
1118{
1119 return 1;
1120}
1121
1122
1123/*============================================================================
1124 * Send a packet on a network interface.
1125 * o set tbusy flag (marks start of the transmission) to block a timer-based
1126 * transmit from overlapping.
1127 * o check link state. If link is not up, then drop the packet.
1128 * o execute adapter send command.
1129 * o free socket buffer
1130 *
1131 * Return: 0 complete (socket buffer must be freed)
1132 * non-0 packet may be re-transmitted (tbusy must be set)
1133 *
1134 * Notes:
1135 * 1. This routine is called either by the protocol stack or by the "net
1136 * bottom half" (with interrupts enabled).
1137 * 2. Setting tbusy flag will inhibit further transmit requests from the
1138 * protocol stack and can be used for flow control with protocol layer.
1139 */
1140static int if_send(struct sk_buff* skb, struct net_device* dev)
1141{
1142 chdlc_private_area_t *chdlc_priv_area = dev->priv;
1143 sdla_t *card = chdlc_priv_area->card;
1144 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
1145 INTERRUPT_INFORMATION_STRUCT *chdlc_int = &flags->interrupt_info_struct;
1146 int udp_type = 0;
1147 unsigned long smp_flags;
1148 int err=0;
1149
1150 netif_stop_queue(dev);
1151
1152 if (skb == NULL){
1153 /* If we get here, some higher layer thinks we've missed an
1154 * tx-done interrupt.
1155 */
1156 printk(KERN_INFO "%s: interface %s got kicked!\n",
1157 card->devname, dev->name);
1158
1159 netif_wake_queue(dev);
1160 return 0;
1161 }
1162
1163 if (ntohs(skb->protocol) != htons(PVC_PROT)){
1164
1165 /* check the udp packet type */
1166
1167 udp_type = udp_pkt_type(skb, card);
1168
1169 if (udp_type == UDP_CPIPE_TYPE){
1170 if(store_udp_mgmt_pkt(UDP_PKT_FRM_STACK, card, skb, dev,
1171 chdlc_priv_area)){
1172 chdlc_int->interrupt_permission |=
1173 APP_INT_ON_TIMER;
1174 }
1175 netif_start_queue(dev);
1176 return 0;
1177 }
1178
1179 /* check to see if the source IP address is a broadcast or */
1180 /* multicast IP address */
1181 if(chk_bcast_mcast_addr(card, dev, skb)){
1182 ++card->wandev.stats.tx_dropped;
1183 dev_kfree_skb_any(skb);
1184 netif_start_queue(dev);
1185 return 0;
1186 }
1187 }
1188
1189 /* Lock the 508 Card: SMP is supported */
1190 if(card->hw.type != SDLA_S514){
1191 s508_lock(card,&smp_flags);
1192 }
1193
1194 if(test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
1195
1196 printk(KERN_INFO "%s: Critical in if_send: %lx\n",
1197 card->wandev.name,card->wandev.critical);
1198 ++card->wandev.stats.tx_dropped;
1199 netif_start_queue(dev);
1200 goto if_send_exit_crit;
1201 }
1202
1203 if(card->u.c.state != WAN_CONNECTED){
1204 ++card->wandev.stats.tx_dropped;
1205 netif_start_queue(dev);
1206
1207 }else if(!skb->protocol){
1208 ++card->wandev.stats.tx_errors;
1209 netif_start_queue(dev);
1210
1211 }else {
1212 void* data = skb->data;
1213 unsigned len = skb->len;
1214 unsigned char attr;
1215
1216 /* If it's an API packet pull off the API
1217 * header. Also check that the packet size
1218 * is larger than the API header
1219 */
1220 if (card->u.c.usedby == API){
1221 api_tx_hdr_t* api_tx_hdr;
1222
1223 /* discard the frame if we are configured for */
1224 /* 'receive only' mode or if there is no data */
1225 if (card->u.c.receive_only ||
1226 (len <= sizeof(api_tx_hdr_t))) {
1227
1228 ++card->wandev.stats.tx_dropped;
1229 netif_start_queue(dev);
1230 goto if_send_exit_crit;
1231 }
1232
1233 api_tx_hdr = (api_tx_hdr_t *)data;
1234 attr = api_tx_hdr->attr;
1235 data += sizeof(api_tx_hdr_t);
1236 len -= sizeof(api_tx_hdr_t);
1237 }
1238
1239 if(chdlc_send(card, data, len)) {
1240 netif_stop_queue(dev);
1241 }else{
1242 ++card->wandev.stats.tx_packets;
1243 card->wandev.stats.tx_bytes += len;
1244
1245 netif_start_queue(dev);
1246
1247 dev->trans_start = jiffies;
1248 }
1249 }
1250
1251if_send_exit_crit:
1252
1253 if (!(err=netif_queue_stopped(dev))) {
1254 dev_kfree_skb_any(skb);
1255 }else{
1256 chdlc_priv_area->tick_counter = jiffies;
1257 chdlc_int->interrupt_permission |= APP_INT_ON_TX_FRAME;
1258 }
1259
1260 clear_bit(SEND_CRIT, (void*)&card->wandev.critical);
1261 if(card->hw.type != SDLA_S514){
1262 s508_unlock(card,&smp_flags);
1263 }
1264
1265 return err;
1266}
1267
1268
1269/*============================================================================
1270 * Check to see if the packet to be transmitted contains a broadcast or
1271 * multicast source IP address.
1272 */
1273
1274static int chk_bcast_mcast_addr(sdla_t *card, struct net_device* dev,
1275 struct sk_buff *skb)
1276{
1277 u32 src_ip_addr;
1278 u32 broadcast_ip_addr = 0;
1279 struct in_device *in_dev;
1280
1281 /* read the IP source address from the outgoing packet */
1282 src_ip_addr = *(u32 *)(skb->data + 12);
1283
1284 /* read the IP broadcast address for the device */
1285 in_dev = dev->ip_ptr;
1286 if(in_dev != NULL) {
1287 struct in_ifaddr *ifa= in_dev->ifa_list;
1288 if(ifa != NULL)
1289 broadcast_ip_addr = ifa->ifa_broadcast;
1290 else
1291 return 0;
1292 }
1293
1294 /* check if the IP Source Address is a Broadcast address */
1295 if((dev->flags & IFF_BROADCAST) && (src_ip_addr == broadcast_ip_addr)) {
1296 printk(KERN_INFO "%s: Broadcast Source Address silently discarded\n",
1297 card->devname);
1298 return 1;
1299 }
1300
1301 /* check if the IP Source Address is a Multicast address */
1302 if((ntohl(src_ip_addr) >= 0xE0000001) &&
1303 (ntohl(src_ip_addr) <= 0xFFFFFFFE)) {
1304 printk(KERN_INFO "%s: Multicast Source Address silently discarded\n",
1305 card->devname);
1306 return 1;
1307 }
1308
1309 return 0;
1310}
1311
1312
1313/*============================================================================
1314 * Reply to UDP Management system.
1315 * Return length of reply.
1316 */
1317static int reply_udp( unsigned char *data, unsigned int mbox_len )
1318{
1319
1320 unsigned short len, udp_length, temp, ip_length;
1321 unsigned long ip_temp;
1322 int even_bound = 0;
1323 chdlc_udp_pkt_t *c_udp_pkt = (chdlc_udp_pkt_t *)data;
1324
1325 /* Set length of packet */
1326 len = sizeof(ip_pkt_t)+
1327 sizeof(udp_pkt_t)+
1328 sizeof(wp_mgmt_t)+
1329 sizeof(cblock_t)+
1330 sizeof(trace_info_t)+
1331 mbox_len;
1332
1333 /* fill in UDP reply */
1334 c_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
1335
1336 /* fill in UDP length */
1337 udp_length = sizeof(udp_pkt_t)+
1338 sizeof(wp_mgmt_t)+
1339 sizeof(cblock_t)+
1340 sizeof(trace_info_t)+
1341 mbox_len;
1342
1343 /* put it on an even boundary */
1344 if ( udp_length & 0x0001 ) {
1345 udp_length += 1;
1346 len += 1;
1347 even_bound = 1;
1348 }
1349
1350 temp = (udp_length<<8)|(udp_length>>8);
1351 c_udp_pkt->udp_pkt.udp_length = temp;
1352
1353 /* swap UDP ports */
1354 temp = c_udp_pkt->udp_pkt.udp_src_port;
1355 c_udp_pkt->udp_pkt.udp_src_port =
1356 c_udp_pkt->udp_pkt.udp_dst_port;
1357 c_udp_pkt->udp_pkt.udp_dst_port = temp;
1358
1359 /* add UDP pseudo header */
1360 temp = 0x1100;
1361 *((unsigned short *)(c_udp_pkt->data+mbox_len+even_bound)) = temp;
1362 temp = (udp_length<<8)|(udp_length>>8);
1363 *((unsigned short *)(c_udp_pkt->data+mbox_len+even_bound+2)) = temp;
1364
1365
1366 /* calculate UDP checksum */
1367 c_udp_pkt->udp_pkt.udp_checksum = 0;
1368 c_udp_pkt->udp_pkt.udp_checksum = calc_checksum(&data[UDP_OFFSET],udp_length+UDP_OFFSET);
1369
1370 /* fill in IP length */
1371 ip_length = len;
1372 temp = (ip_length<<8)|(ip_length>>8);
1373 c_udp_pkt->ip_pkt.total_length = temp;
1374
1375 /* swap IP addresses */
1376 ip_temp = c_udp_pkt->ip_pkt.ip_src_address;
1377 c_udp_pkt->ip_pkt.ip_src_address = c_udp_pkt->ip_pkt.ip_dst_address;
1378 c_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
1379
1380 /* fill in IP checksum */
1381 c_udp_pkt->ip_pkt.hdr_checksum = 0;
1382 c_udp_pkt->ip_pkt.hdr_checksum = calc_checksum(data,sizeof(ip_pkt_t));
1383
1384 return len;
1385
1386} /* reply_udp */
1387
1388unsigned short calc_checksum (char *data, int len)
1389{
1390 unsigned short temp;
1391 unsigned long sum=0;
1392 int i;
1393
1394 for( i = 0; i <len; i+=2 ) {
1395 memcpy(&temp,&data[i],2);
1396 sum += (unsigned long)temp;
1397 }
1398
1399 while (sum >> 16 ) {
1400 sum = (sum & 0xffffUL) + (sum >> 16);
1401 }
1402
1403 temp = (unsigned short)sum;
1404 temp = ~temp;
1405
1406 if( temp == 0 )
1407 temp = 0xffff;
1408
1409 return temp;
1410}
1411
1412
1413/*============================================================================
1414 * Get ethernet-style interface statistics.
1415 * Return a pointer to struct enet_statistics.
1416 */
1417static struct net_device_stats* if_stats(struct net_device* dev)
1418{
1419 sdla_t *my_card;
1420 chdlc_private_area_t* chdlc_priv_area;
1421
1422 if ((chdlc_priv_area=dev->priv) == NULL)
1423 return NULL;
1424
1425 my_card = chdlc_priv_area->card;
1426 return &my_card->wandev.stats;
1427}
1428
1429
1430/****** Cisco HDLC Firmware Interface Functions *******************************/
1431
1432/*============================================================================
1433 * Read firmware code version.
1434 * Put code version as ASCII string in str.
1435 */
1436static int chdlc_read_version (sdla_t* card, char* str)
1437{
1438 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1439 int len;
1440 char err;
1441 mb->buffer_length = 0;
1442 mb->command = READ_CHDLC_CODE_VERSION;
1443 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1444
1445 if(err != COMMAND_OK) {
1446 chdlc_error(card,err,mb);
1447 }
1448 else if (str) { /* is not null */
1449 len = mb->buffer_length;
1450 memcpy(str, mb->data, len);
1451 str[len] = '\0';
1452 }
1453 return (err);
1454}
1455
1456/*-----------------------------------------------------------------------------
1457 * Configure CHDLC firmware.
1458 */
1459static int chdlc_configure (sdla_t* card, void* data)
1460{
1461 int err;
1462 CHDLC_MAILBOX_STRUCT *mailbox = card->mbox;
1463 int data_length = sizeof(CHDLC_CONFIGURATION_STRUCT);
1464
1465 mailbox->buffer_length = data_length;
1466 memcpy(mailbox->data, data, data_length);
1467 mailbox->command = SET_CHDLC_CONFIGURATION;
1468 err = sdla_exec(mailbox) ? mailbox->return_code : CMD_TIMEOUT;
1469
1470 if (err != COMMAND_OK) chdlc_error (card, err, mailbox);
1471
1472 return err;
1473}
1474
1475
1476/*============================================================================
1477 * Set interrupt mode -- HDLC Version.
1478 */
1479
1480static int chdlc_set_intr_mode (sdla_t* card, unsigned mode)
1481{
1482 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1483 CHDLC_INT_TRIGGERS_STRUCT* int_data =
1484 (CHDLC_INT_TRIGGERS_STRUCT *)mb->data;
1485 int err;
1486
1487 int_data->CHDLC_interrupt_triggers = mode;
1488 int_data->IRQ = card->hw.irq;
1489 int_data->interrupt_timer = 1;
1490
1491 mb->buffer_length = sizeof(CHDLC_INT_TRIGGERS_STRUCT);
1492 mb->command = SET_CHDLC_INTERRUPT_TRIGGERS;
1493 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1494 if (err != COMMAND_OK)
1495 chdlc_error (card, err, mb);
1496 return err;
1497}
1498
1499
1500/*===========================================================
1501 * chdlc_disable_comm_shutdown
1502 *
1503 * Shutdown() disables the communications. We must
1504 * have a sparate functions, because we must not
1505 * call chdlc_error() hander since the private
1506 * area has already been replaced */
1507
1508static int chdlc_disable_comm_shutdown (sdla_t *card)
1509{
1510 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1511 CHDLC_INT_TRIGGERS_STRUCT* int_data =
1512 (CHDLC_INT_TRIGGERS_STRUCT *)mb->data;
1513 int err;
1514
1515 /* Disable Interrutps */
1516 int_data->CHDLC_interrupt_triggers = 0;
1517 int_data->IRQ = card->hw.irq;
1518 int_data->interrupt_timer = 1;
1519
1520 mb->buffer_length = sizeof(CHDLC_INT_TRIGGERS_STRUCT);
1521 mb->command = SET_CHDLC_INTERRUPT_TRIGGERS;
1522 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1523
1524 /* Disable Communications */
1525
1526 if (card->u.c.async_mode) {
1527 mb->command = DISABLE_ASY_COMMUNICATIONS;
1528 }else{
1529 mb->command = DISABLE_CHDLC_COMMUNICATIONS;
1530 }
1531
1532 mb->buffer_length = 0;
1533 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1534
1535 card->u.c.comm_enabled = 0;
1536
1537 return 0;
1538}
1539
1540/*============================================================================
1541 * Enable communications.
1542 */
1543
1544static int chdlc_comm_enable (sdla_t* card)
1545{
1546 int err;
1547 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1548
1549 mb->buffer_length = 0;
1550 mb->command = ENABLE_CHDLC_COMMUNICATIONS;
1551 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1552 if (err != COMMAND_OK)
1553 chdlc_error(card, err, mb);
1554 else
1555 card->u.c.comm_enabled = 1;
1556
1557 return err;
1558}
1559
1560/*============================================================================
1561 * Read communication error statistics.
1562 */
1563static int chdlc_read_comm_err_stats (sdla_t* card)
1564{
1565 int err;
1566 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1567
1568 mb->buffer_length = 0;
1569 mb->command = READ_COMMS_ERROR_STATS;
1570 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1571 if (err != COMMAND_OK)
1572 chdlc_error(card,err,mb);
1573 return err;
1574}
1575
1576
1577/*============================================================================
1578 * Read CHDLC operational statistics.
1579 */
1580static int chdlc_read_op_stats (sdla_t* card)
1581{
1582 int err;
1583 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1584
1585 mb->buffer_length = 0;
1586 mb->command = READ_CHDLC_OPERATIONAL_STATS;
1587 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1588 if (err != COMMAND_OK)
1589 chdlc_error(card,err,mb);
1590 return err;
1591}
1592
1593
1594/*============================================================================
1595 * Update communications error and general packet statistics.
1596 */
1597static int update_comms_stats(sdla_t* card,
1598 chdlc_private_area_t* chdlc_priv_area)
1599{
1600 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1601 COMMS_ERROR_STATS_STRUCT* err_stats;
1602 CHDLC_OPERATIONAL_STATS_STRUCT *op_stats;
1603
1604 /* on the first timer interrupt, read the comms error statistics */
1605 if(chdlc_priv_area->update_comms_stats == 2) {
1606 if(chdlc_read_comm_err_stats(card))
1607 return 1;
1608 err_stats = (COMMS_ERROR_STATS_STRUCT *)mb->data;
1609 card->wandev.stats.rx_over_errors =
1610 err_stats->Rx_overrun_err_count;
1611 card->wandev.stats.rx_crc_errors =
1612 err_stats->CRC_err_count;
1613 card->wandev.stats.rx_frame_errors =
1614 err_stats->Rx_abort_count;
1615 card->wandev.stats.rx_fifo_errors =
1616 err_stats->Rx_dis_pri_bfrs_full_count;
1617 card->wandev.stats.rx_missed_errors =
1618 card->wandev.stats.rx_fifo_errors;
1619 card->wandev.stats.tx_aborted_errors =
1620 err_stats->sec_Tx_abort_count;
1621 }
1622
1623 /* on the second timer interrupt, read the operational statistics */
1624 else {
1625 if(chdlc_read_op_stats(card))
1626 return 1;
1627 op_stats = (CHDLC_OPERATIONAL_STATS_STRUCT *)mb->data;
1628 card->wandev.stats.rx_length_errors =
1629 (op_stats->Rx_Data_discard_short_count +
1630 op_stats->Rx_Data_discard_long_count);
1631 }
1632
1633 return 0;
1634}
1635
1636/*============================================================================
1637 * Send packet.
1638 * Return: 0 - o.k.
1639 * 1 - no transmit buffers available
1640 */
1641static int chdlc_send (sdla_t* card, void* data, unsigned len)
1642{
1643 CHDLC_DATA_TX_STATUS_EL_STRUCT *txbuf = card->u.c.txbuf;
1644
1645 if (txbuf->opp_flag)
1646 return 1;
1647
1648 sdla_poke(&card->hw, txbuf->ptr_data_bfr, data, len);
1649
1650 txbuf->frame_length = len;
1651 txbuf->opp_flag = 1; /* start transmission */
1652
1653 /* Update transmit buffer control fields */
1654 card->u.c.txbuf = ++txbuf;
1655
1656 if ((void*)txbuf > card->u.c.txbuf_last)
1657 card->u.c.txbuf = card->u.c.txbuf_base;
1658
1659 return 0;
1660}
1661
1662/****** Firmware Error Handler **********************************************/
1663
1664/*============================================================================
1665 * Firmware error handler.
1666 * This routine is called whenever firmware command returns non-zero
1667 * return code.
1668 *
1669 * Return zero if previous command has to be cancelled.
1670 */
1671static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb)
1672{
1673 unsigned cmd = mb->command;
1674
1675 switch (err) {
1676
1677 case CMD_TIMEOUT:
1678 printk(KERN_INFO "%s: command 0x%02X timed out!\n",
1679 card->devname, cmd);
1680 break;
1681
1682 case S514_BOTH_PORTS_SAME_CLK_MODE:
1683 if(cmd == SET_CHDLC_CONFIGURATION) {
1684 printk(KERN_INFO
1685 "%s: Configure both ports for the same clock source\n",
1686 card->devname);
1687 break;
1688 }
1689
1690 default:
1691 printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n",
1692 card->devname, cmd, err);
1693 }
1694
1695 return 0;
1696}
1697
1698
1699/********** Bottom Half Handlers ********************************************/
1700
1701/* NOTE: There is no API, BH support for Kernels lower than 2.2.X.
1702 * DO NOT INSERT ANY CODE HERE, NOTICE THE
1703 * PREPROCESSOR STATEMENT ABOVE, UNLESS YOU KNOW WHAT YOU ARE
1704 * DOING */
1705
1706static void chdlc_work(struct net_device * dev)
1707{
1708 chdlc_private_area_t* chan = dev->priv;
1709 sdla_t *card = chan->card;
1710 struct sk_buff *skb;
1711
1712 if (atomic_read(&chan->bh_buff_used) == 0){
1713 clear_bit(0, &chan->tq_working);
1714 return;
1715 }
1716
1717 while (atomic_read(&chan->bh_buff_used)){
1718
1719 skb = ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb;
1720
1721 if (skb != NULL){
1722
1723 if (chan->common.sk == NULL || chan->common.func == NULL){
1724 ++card->wandev.stats.rx_dropped;
1725 dev_kfree_skb_any(skb);
1726 chdlc_work_cleanup(dev);
1727 continue;
1728 }
1729
1730 if (chan->common.func(skb,dev,chan->common.sk) != 0){
1731 /* Sock full cannot send, queue us for another
1732 * try */
1733 atomic_set(&chan->common.receive_block,1);
1734 return;
1735 }else{
1736 chdlc_work_cleanup(dev);
1737 }
1738 }else{
1739 chdlc_work_cleanup(dev);
1740 }
1741 }
1742 clear_bit(0, &chan->tq_working);
1743
1744 return;
1745}
1746
1747static int chdlc_work_cleanup(struct net_device *dev)
1748{
1749 chdlc_private_area_t* chan = dev->priv;
1750
1751 ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb = NULL;
1752
1753 if (chan->bh_read == MAX_BH_BUFF){
1754 chan->bh_read=0;
1755 }else{
1756 ++chan->bh_read;
1757 }
1758
1759 atomic_dec(&chan->bh_buff_used);
1760 return 0;
1761}
1762
1763
1764
1765static int bh_enqueue(struct net_device *dev, struct sk_buff *skb)
1766{
1767 /* Check for full */
1768 chdlc_private_area_t* chan = dev->priv;
1769 sdla_t *card = chan->card;
1770
1771 if (atomic_read(&chan->bh_buff_used) == (MAX_BH_BUFF+1)){
1772 ++card->wandev.stats.rx_dropped;
1773 dev_kfree_skb_any(skb);
1774 return 1;
1775 }
1776
1777 ((bh_data_t *)&chan->bh_head[chan->bh_write])->skb = skb;
1778
1779 if (chan->bh_write == MAX_BH_BUFF){
1780 chan->bh_write=0;
1781 }else{
1782 ++chan->bh_write;
1783 }
1784
1785 atomic_inc(&chan->bh_buff_used);
1786
1787 return 0;
1788}
1789
1790/* END OF API BH Support */
1791
1792
1793/****** Interrupt Handlers **************************************************/
1794
1795/*============================================================================
1796 * Cisco HDLC interrupt service routine.
1797 */
1798static void wpc_isr (sdla_t* card)
1799{
1800 struct net_device* dev;
1801 SHARED_MEMORY_INFO_STRUCT* flags = NULL;
1802 int i;
1803 sdla_t *my_card;
1804
1805
1806 /* Check for which port the interrupt has been generated
1807 * Since Secondary Port is piggybacking on the Primary
1808 * the check must be done here.
1809 */
1810
1811 flags = card->u.c.flags;
1812 if (!flags->interrupt_info_struct.interrupt_type){
1813 /* Check for a second port (piggybacking) */
1814 if ((my_card = card->next)){
1815 flags = my_card->u.c.flags;
1816 if (flags->interrupt_info_struct.interrupt_type){
1817 card = my_card;
1818 card->isr(card);
1819 return;
1820 }
1821 }
1822 }
1823
1824 flags = card->u.c.flags;
1825 card->in_isr = 1;
1826 dev = card->wandev.dev;
1827
1828 /* If we get an interrupt with no network device, stop the interrupts
1829 * and issue an error */
1830 if (!card->tty_opt && !dev &&
1831 flags->interrupt_info_struct.interrupt_type !=
1832 COMMAND_COMPLETE_APP_INT_PEND){
1833
1834 goto isr_done;
1835 }
1836
1837 /* if critical due to peripheral operations
1838 * ie. update() or getstats() then reset the interrupt and
1839 * wait for the board to retrigger.
1840 */
1841 if(test_bit(PERI_CRIT, (void*)&card->wandev.critical)) {
1842 printk(KERN_INFO "ISR CRIT TO PERI\n");
1843 goto isr_done;
1844 }
1845
1846 /* On a 508 Card, if critical due to if_send
1847 * Major Error !!! */
1848 if(card->hw.type != SDLA_S514) {
1849 if(test_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
1850 printk(KERN_INFO "%s: Critical while in ISR: %lx\n",
1851 card->devname, card->wandev.critical);
1852 card->in_isr = 0;
1853 flags->interrupt_info_struct.interrupt_type = 0;
1854 return;
1855 }
1856 }
1857
1858 switch(flags->interrupt_info_struct.interrupt_type) {
1859
1860 case RX_APP_INT_PEND: /* 0x01: receive interrupt */
1861 rx_intr(card);
1862 break;
1863
1864 case TX_APP_INT_PEND: /* 0x02: transmit interrupt */
1865 flags->interrupt_info_struct.interrupt_permission &=
1866 ~APP_INT_ON_TX_FRAME;
1867
1868 if (card->tty_opt){
1869 wanpipe_tty_trigger_poll(card);
1870 break;
1871 }
1872
1873 if (dev && netif_queue_stopped(dev)){
1874 if (card->u.c.usedby == API){
1875 netif_start_queue(dev);
1876 wakeup_sk_bh(dev);
1877 }else{
1878 netif_wake_queue(dev);
1879 }
1880 }
1881 break;
1882
1883 case COMMAND_COMPLETE_APP_INT_PEND:/* 0x04: cmd cplt */
1884 ++ Intr_test_counter;
1885 break;
1886
1887 case CHDLC_EXCEP_COND_APP_INT_PEND: /* 0x20 */
1888 process_chdlc_exception(card);
1889 break;
1890
1891 case GLOBAL_EXCEP_COND_APP_INT_PEND:
1892 process_global_exception(card);
1893 break;
1894
1895 case TIMER_APP_INT_PEND:
1896 timer_intr(card);
1897 break;
1898
1899 default:
1900 printk(KERN_INFO "%s: spurious interrupt 0x%02X!\n",
1901 card->devname,
1902 flags->interrupt_info_struct.interrupt_type);
1903 printk(KERN_INFO "Code name: ");
1904 for(i = 0; i < 4; i ++)
1905 printk(KERN_INFO "%c",
1906 flags->global_info_struct.codename[i]);
1907 printk(KERN_INFO "\nCode version: ");
1908 for(i = 0; i < 4; i ++)
1909 printk(KERN_INFO "%c",
1910 flags->global_info_struct.codeversion[i]);
1911 printk(KERN_INFO "\n");
1912 break;
1913 }
1914
1915isr_done:
1916
1917 card->in_isr = 0;
1918 flags->interrupt_info_struct.interrupt_type = 0;
1919 return;
1920}
1921
1922/*============================================================================
1923 * Receive interrupt handler.
1924 */
1925static void rx_intr (sdla_t* card)
1926{
1927 struct net_device *dev;
1928 chdlc_private_area_t *chdlc_priv_area;
1929 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
1930 CHDLC_DATA_RX_STATUS_EL_STRUCT *rxbuf = card->u.c.rxmb;
1931 struct sk_buff *skb;
1932 unsigned len;
1933 unsigned addr = rxbuf->ptr_data_bfr;
1934 void *buf;
1935 int i,udp_type;
1936
1937 if (rxbuf->opp_flag != 0x01) {
1938 printk(KERN_INFO
1939 "%s: corrupted Rx buffer @ 0x%X, flag = 0x%02X!\n",
1940 card->devname, (unsigned)rxbuf, rxbuf->opp_flag);
1941 printk(KERN_INFO "Code name: ");
1942 for(i = 0; i < 4; i ++)
1943 printk(KERN_INFO "%c",
1944 flags->global_info_struct.codename[i]);
1945 printk(KERN_INFO "\nCode version: ");
1946 for(i = 0; i < 4; i ++)
1947 printk(KERN_INFO "%c",
1948 flags->global_info_struct.codeversion[i]);
1949 printk(KERN_INFO "\n");
1950
1951
1952 /* Bug Fix: Mar 6 2000
1953 * If we get a corrupted mailbox, it measn that driver
1954 * is out of sync with the firmware. There is no recovery.
1955 * If we don't turn off all interrupts for this card
1956 * the machine will crash.
1957 */
1958 printk(KERN_INFO "%s: Critical router failure ...!!!\n", card->devname);
1959 printk(KERN_INFO "Please contact Sangoma Technologies !\n");
1960 chdlc_set_intr_mode(card,0);
1961 return;
1962 }
1963
1964 len = rxbuf->frame_length;
1965
1966 if (card->tty_opt){
1967
1968 if (rxbuf->error_flag){
1969 goto rx_exit;
1970 }
1971
1972 if (len <= CRC_LENGTH){
1973 goto rx_exit;
1974 }
1975
1976 if (!card->u.c.async_mode){
1977 len -= CRC_LENGTH;
1978 }
1979
1980 wanpipe_tty_receive(card,addr,len);
1981 goto rx_exit;
1982 }
1983
1984 dev = card->wandev.dev;
1985
1986 if (!dev){
1987 goto rx_exit;
1988 }
1989
1990 if (!netif_running(dev))
1991 goto rx_exit;
1992
1993 chdlc_priv_area = dev->priv;
1994
1995
1996 /* Allocate socket buffer */
1997 skb = dev_alloc_skb(len);
1998
1999 if (skb == NULL) {
2000 printk(KERN_INFO "%s: no socket buffers available!\n",
2001 card->devname);
2002 ++card->wandev.stats.rx_dropped;
2003 goto rx_exit;
2004 }
2005
2006 /* Copy data to the socket buffer */
2007 if((addr + len) > card->u.c.rx_top + 1) {
2008 unsigned tmp = card->u.c.rx_top - addr + 1;
2009 buf = skb_put(skb, tmp);
2010 sdla_peek(&card->hw, addr, buf, tmp);
2011 addr = card->u.c.rx_base;
2012 len -= tmp;
2013 }
2014
2015 buf = skb_put(skb, len);
2016 sdla_peek(&card->hw, addr, buf, len);
2017
2018 skb->protocol = htons(ETH_P_IP);
2019
2020 card->wandev.stats.rx_packets ++;
2021 card->wandev.stats.rx_bytes += skb->len;
2022 udp_type = udp_pkt_type( skb, card );
2023
2024 if(udp_type == UDP_CPIPE_TYPE) {
2025 if(store_udp_mgmt_pkt(UDP_PKT_FRM_NETWORK,
2026 card, skb, dev, chdlc_priv_area)) {
2027 flags->interrupt_info_struct.
2028 interrupt_permission |=
2029 APP_INT_ON_TIMER;
2030 }
2031 } else if(card->u.c.usedby == API) {
2032
2033 api_rx_hdr_t* api_rx_hdr;
2034 skb_push(skb, sizeof(api_rx_hdr_t));
2035 api_rx_hdr = (api_rx_hdr_t*)&skb->data[0x00];
2036 api_rx_hdr->error_flag = rxbuf->error_flag;
2037 api_rx_hdr->time_stamp = rxbuf->time_stamp;
2038
2039 skb->protocol = htons(PVC_PROT);
2040 skb->mac.raw = skb->data;
2041 skb->dev = dev;
2042 skb->pkt_type = WAN_PACKET_DATA;
2043
2044 bh_enqueue(dev, skb);
2045
2046 if (!test_and_set_bit(0,&chdlc_priv_area->tq_working))
2047 wanpipe_queue_work(&chdlc_priv_area->common.wanpipe_work);
2048 }else{
2049 /* FIXME: we should check to see if the received packet is a
2050 multicast packet so that we can increment the multicast
2051 statistic
2052 ++ chdlc_priv_area->if_stats.multicast;
2053 */
2054 /* Pass it up the protocol stack */
2055
2056 skb->dev = dev;
2057 skb->mac.raw = skb->data;
2058 netif_rx(skb);
2059 dev->last_rx = jiffies;
2060 }
2061
2062rx_exit:
2063 /* Release buffer element and calculate a pointer to the next one */
2064 rxbuf->opp_flag = 0x00;
2065 card->u.c.rxmb = ++ rxbuf;
2066 if((void*)rxbuf > card->u.c.rxbuf_last){
2067 card->u.c.rxmb = card->u.c.rxbuf_base;
2068 }
2069}
2070
2071/*============================================================================
2072 * Timer interrupt handler.
2073 * The timer interrupt is used for two purposes:
2074 * 1) Processing udp calls from 'cpipemon'.
2075 * 2) Reading board-level statistics for updating the proc file system.
2076 */
2077void timer_intr(sdla_t *card)
2078{
2079 struct net_device* dev;
2080 chdlc_private_area_t* chdlc_priv_area = NULL;
2081 SHARED_MEMORY_INFO_STRUCT* flags = NULL;
2082
2083 if ((dev = card->wandev.dev)==NULL){
2084 flags = card->u.c.flags;
2085 flags->interrupt_info_struct.interrupt_permission &=
2086 ~APP_INT_ON_TIMER;
2087 return;
2088 }
2089
2090 chdlc_priv_area = dev->priv;
2091
2092 if (chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_CONFIG) {
2093 if (!config_chdlc(card)){
2094 chdlc_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_CONFIG;
2095 }
2096 }
2097
2098 /* process a udp call if pending */
2099 if(chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_UDP) {
2100 process_udp_mgmt_pkt(card, dev,
2101 chdlc_priv_area);
2102 chdlc_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_UDP;
2103 }
2104
2105 /* read the communications statistics if required */
2106 if(chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_UPDATE) {
2107 update_comms_stats(card, chdlc_priv_area);
2108 if(!(-- chdlc_priv_area->update_comms_stats)) {
2109 chdlc_priv_area->timer_int_enabled &=
2110 ~TMR_INT_ENABLED_UPDATE;
2111 }
2112 }
2113
2114 /* only disable the timer interrupt if there are no udp or statistic */
2115 /* updates pending */
2116 if(!chdlc_priv_area->timer_int_enabled) {
2117 flags = card->u.c.flags;
2118 flags->interrupt_info_struct.interrupt_permission &=
2119 ~APP_INT_ON_TIMER;
2120 }
2121}
2122
2123/*------------------------------------------------------------------------------
2124 Miscellaneous Functions
2125 - set_chdlc_config() used to set configuration options on the board
2126------------------------------------------------------------------------------*/
2127
2128static int set_chdlc_config(sdla_t* card)
2129{
2130 CHDLC_CONFIGURATION_STRUCT cfg;
2131
2132 memset(&cfg, 0, sizeof(CHDLC_CONFIGURATION_STRUCT));
2133
2134 if(card->wandev.clocking){
2135 cfg.baud_rate = card->wandev.bps;
2136 }
2137
2138 cfg.line_config_options = (card->wandev.interface == WANOPT_RS232) ?
2139 INTERFACE_LEVEL_RS232 : INTERFACE_LEVEL_V35;
2140
2141 cfg.modem_config_options = 0;
2142 cfg.modem_status_timer = 100;
2143
2144 cfg.CHDLC_protocol_options = card->u.c.protocol_options;
2145
2146 if (card->tty_opt){
2147 cfg.CHDLC_API_options = DISCARD_RX_ERROR_FRAMES;
2148 }
2149
2150 cfg.percent_data_buffer_for_Tx = (card->u.c.receive_only) ? 0 : 50;
2151 cfg.CHDLC_statistics_options = (CHDLC_TX_DATA_BYTE_COUNT_STAT |
2152 CHDLC_RX_DATA_BYTE_COUNT_STAT);
2153
2154 if (card->tty_opt){
2155 card->wandev.mtu = TTY_CHDLC_MAX_MTU;
2156 }
2157 cfg.max_CHDLC_data_field_length = card->wandev.mtu;
2158 cfg.transmit_keepalive_timer = card->u.c.kpalv_tx;
2159 cfg.receive_keepalive_timer = card->u.c.kpalv_rx;
2160 cfg.keepalive_error_tolerance = card->u.c.kpalv_err;
2161 cfg.SLARP_request_timer = card->u.c.slarp_timer;
2162
2163 if (cfg.SLARP_request_timer) {
2164 cfg.IP_address = 0;
2165 cfg.IP_netmask = 0;
2166
2167 }else if (card->wandev.dev){
2168 struct net_device *dev = card->wandev.dev;
2169 chdlc_private_area_t *chdlc_priv_area = dev->priv;
2170
2171 struct in_device *in_dev = dev->ip_ptr;
2172
2173 if(in_dev != NULL) {
2174 struct in_ifaddr *ifa = in_dev->ifa_list;
2175
2176 if (ifa != NULL ) {
2177 cfg.IP_address = ntohl(ifa->ifa_local);
2178 cfg.IP_netmask = ntohl(ifa->ifa_mask);
2179 chdlc_priv_area->IP_address = ntohl(ifa->ifa_local);
2180 chdlc_priv_area->IP_netmask = ntohl(ifa->ifa_mask);
2181 }
2182 }
2183
2184 /* FIXME: We must re-think this message in next release
2185 if((cfg.IP_address & 0x000000FF) > 2) {
2186 printk(KERN_WARNING "\n");
2187 printk(KERN_WARNING " WARNING:%s configured with an\n",
2188 card->devname);
2189 printk(KERN_WARNING " invalid local IP address.\n");
2190 printk(KERN_WARNING " Slarp pragmatics will fail.\n");
2191 printk(KERN_WARNING " IP address should be of the\n");
2192 printk(KERN_WARNING " format A.B.C.1 or A.B.C.2.\n");
2193 }
2194 */
2195 }
2196
2197 return chdlc_configure(card, &cfg);
2198}
2199
2200
2201/*-----------------------------------------------------------------------------
2202 set_asy_config() used to set asynchronous configuration options on the board
2203------------------------------------------------------------------------------*/
2204
2205static int set_asy_config(sdla_t* card)
2206{
2207
2208 ASY_CONFIGURATION_STRUCT cfg;
2209 CHDLC_MAILBOX_STRUCT *mailbox = card->mbox;
2210 int err;
2211
2212 memset(&cfg, 0, sizeof(ASY_CONFIGURATION_STRUCT));
2213
2214 if(card->wandev.clocking)
2215 cfg.baud_rate = card->wandev.bps;
2216
2217 cfg.line_config_options = (card->wandev.interface == WANOPT_RS232) ?
2218 INTERFACE_LEVEL_RS232 : INTERFACE_LEVEL_V35;
2219
2220 cfg.modem_config_options = 0;
2221 cfg.asy_API_options = card->u.c.api_options;
2222 cfg.asy_protocol_options = card->u.c.protocol_options;
2223 cfg.Tx_bits_per_char = card->u.c.tx_bits_per_char;
2224 cfg.Rx_bits_per_char = card->u.c.rx_bits_per_char;
2225 cfg.stop_bits = card->u.c.stop_bits;
2226 cfg.parity = card->u.c.parity;
2227 cfg.break_timer = card->u.c.break_timer;
2228 cfg.asy_Rx_inter_char_timer = card->u.c.inter_char_timer;
2229 cfg.asy_Rx_complete_length = card->u.c.rx_complete_length;
2230 cfg.XON_char = card->u.c.xon_char;
2231 cfg.XOFF_char = card->u.c.xoff_char;
2232 cfg.asy_statistics_options = (CHDLC_TX_DATA_BYTE_COUNT_STAT |
2233 CHDLC_RX_DATA_BYTE_COUNT_STAT);
2234
2235 mailbox->buffer_length = sizeof(ASY_CONFIGURATION_STRUCT);
2236 memcpy(mailbox->data, &cfg, mailbox->buffer_length);
2237 mailbox->command = SET_ASY_CONFIGURATION;
2238 err = sdla_exec(mailbox) ? mailbox->return_code : CMD_TIMEOUT;
2239 if (err != COMMAND_OK)
2240 chdlc_error (card, err, mailbox);
2241 return err;
2242}
2243
2244/*============================================================================
2245 * Enable asynchronous communications.
2246 */
2247
2248static int asy_comm_enable (sdla_t* card)
2249{
2250
2251 int err;
2252 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
2253
2254 mb->buffer_length = 0;
2255 mb->command = ENABLE_ASY_COMMUNICATIONS;
2256 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
2257 if (err != COMMAND_OK && card->wandev.dev)
2258 chdlc_error(card, err, mb);
2259
2260 if (!err)
2261 card->u.c.comm_enabled = 1;
2262
2263 return err;
2264}
2265
2266/*============================================================================
2267 * Process global exception condition
2268 */
2269static int process_global_exception(sdla_t *card)
2270{
2271 CHDLC_MAILBOX_STRUCT* mbox = card->mbox;
2272 int err;
2273
2274 mbox->buffer_length = 0;
2275 mbox->command = READ_GLOBAL_EXCEPTION_CONDITION;
2276 err = sdla_exec(mbox) ? mbox->return_code : CMD_TIMEOUT;
2277
2278 if(err != CMD_TIMEOUT ){
2279
2280 switch(mbox->return_code) {
2281
2282 case EXCEP_MODEM_STATUS_CHANGE:
2283
2284 printk(KERN_INFO "%s: Modem status change\n",
2285 card->devname);
2286
2287 switch(mbox->data[0] & (DCD_HIGH | CTS_HIGH)) {
2288 case (DCD_HIGH):
2289 printk(KERN_INFO "%s: DCD high, CTS low\n",card->devname);
2290 break;
2291 case (CTS_HIGH):
2292 printk(KERN_INFO "%s: DCD low, CTS high\n",card->devname);
2293 break;
2294 case ((DCD_HIGH | CTS_HIGH)):
2295 printk(KERN_INFO "%s: DCD high, CTS high\n",card->devname);
2296 break;
2297 default:
2298 printk(KERN_INFO "%s: DCD low, CTS low\n",card->devname);
2299 break;
2300 }
2301 break;
2302
2303 case EXCEP_TRC_DISABLED:
2304 printk(KERN_INFO "%s: Line trace disabled\n",
2305 card->devname);
2306 break;
2307
2308 case EXCEP_IRQ_TIMEOUT:
2309 printk(KERN_INFO "%s: IRQ timeout occurred\n",
2310 card->devname);
2311 break;
2312
2313 case 0x17:
2314 if (card->tty_opt){
2315 if (card->tty && card->tty_open){
2316 printk(KERN_INFO
2317 "%s: Modem Hangup Exception: Hanging Up!\n",
2318 card->devname);
2319 tty_hangup(card->tty);
2320 }
2321 break;
2322 }
2323
2324 /* If TTY is not used just drop throught */
2325
2326 default:
2327 printk(KERN_INFO "%s: Global exception %x\n",
2328 card->devname, mbox->return_code);
2329 break;
2330 }
2331 }
2332 return 0;
2333}
2334
2335
2336/*============================================================================
2337 * Process chdlc exception condition
2338 */
2339static int process_chdlc_exception(sdla_t *card)
2340{
2341 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
2342 int err;
2343
2344 mb->buffer_length = 0;
2345 mb->command = READ_CHDLC_EXCEPTION_CONDITION;
2346 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
2347 if(err != CMD_TIMEOUT) {
2348
2349 switch (err) {
2350
2351 case EXCEP_LINK_ACTIVE:
2352 port_set_state(card, WAN_CONNECTED);
2353 trigger_chdlc_poll(card->wandev.dev);
2354 break;
2355
2356 case EXCEP_LINK_INACTIVE_MODEM:
2357 port_set_state(card, WAN_DISCONNECTED);
2358 unconfigure_ip(card);
2359 trigger_chdlc_poll(card->wandev.dev);
2360 break;
2361
2362 case EXCEP_LINK_INACTIVE_KPALV:
2363 port_set_state(card, WAN_DISCONNECTED);
2364 printk(KERN_INFO "%s: Keepalive timer expired.\n",
2365 card->devname);
2366 unconfigure_ip(card);
2367 trigger_chdlc_poll(card->wandev.dev);
2368 break;
2369
2370 case EXCEP_IP_ADDRESS_DISCOVERED:
2371 if (configure_ip(card))
2372 return -1;
2373 break;
2374
2375 case EXCEP_LOOPBACK_CONDITION:
2376 printk(KERN_INFO "%s: Loopback Condition Detected.\n",
2377 card->devname);
2378 break;
2379
2380 case NO_CHDLC_EXCEP_COND_TO_REPORT:
2381 printk(KERN_INFO "%s: No exceptions reported.\n",
2382 card->devname);
2383 break;
2384 }
2385
2386 }
2387 return 0;
2388}
2389
2390
2391/*============================================================================
2392 * Configure IP from SLARP negotiation
2393 * This adds dynamic routes when SLARP has provided valid addresses
2394 */
2395
2396static int configure_ip (sdla_t* card)
2397{
2398 struct net_device *dev = card->wandev.dev;
2399 chdlc_private_area_t *chdlc_priv_area;
2400 char err;
2401
2402 if (!dev)
2403 return 0;
2404
2405 chdlc_priv_area = dev->priv;
2406
2407
2408 /* set to discover */
2409 if(card->u.c.slarp_timer != 0x00) {
2410 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
2411 CHDLC_CONFIGURATION_STRUCT *cfg;
2412
2413 mb->buffer_length = 0;
2414 mb->command = READ_CHDLC_CONFIGURATION;
2415 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
2416
2417 if(err != COMMAND_OK) {
2418 chdlc_error(card,err,mb);
2419 return -1;
2420 }
2421
2422 cfg = (CHDLC_CONFIGURATION_STRUCT *)mb->data;
2423 chdlc_priv_area->IP_address = cfg->IP_address;
2424 chdlc_priv_area->IP_netmask = cfg->IP_netmask;
2425
2426 /* Set flag to add route */
2427 chdlc_priv_area->route_status = ADD_ROUTE;
2428
2429 /* The idea here is to add the route in the poll routine.
2430 This way, we aren't in interrupt context when adding routes */
2431 trigger_chdlc_poll(dev);
2432 }
2433
2434 return 0;
2435}
2436
2437
2438/*============================================================================
2439 * Un-Configure IP negotiated by SLARP
2440 * This removes dynamic routes when the link becomes inactive.
2441 */
2442
2443static int unconfigure_ip (sdla_t* card)
2444{
2445 struct net_device *dev = card->wandev.dev;
2446 chdlc_private_area_t *chdlc_priv_area;
2447
2448 if (!dev)
2449 return 0;
2450
2451 chdlc_priv_area= dev->priv;
2452
2453 if (chdlc_priv_area->route_status == ROUTE_ADDED) {
2454
2455 /* Note: If this function is called, the
2456 * port state has been DISCONNECTED. This state
2457 * change will trigger a poll_disconnected
2458 * function, that will check for this condition.
2459 */
2460 chdlc_priv_area->route_status = REMOVE_ROUTE;
2461
2462 }
2463 return 0;
2464}
2465
2466/*============================================================================
2467 * Routine to add/remove routes
2468 * Called like a polling routine when Routes are flagged to be added/removed.
2469 */
2470
2471static void process_route (sdla_t *card)
2472{
2473 struct net_device *dev = card->wandev.dev;
2474 unsigned char port_num;
2475 chdlc_private_area_t *chdlc_priv_area = NULL;
2476 u32 local_IP_addr = 0;
2477 u32 remote_IP_addr = 0;
2478 u32 IP_netmask, IP_addr;
2479 int err = 0;
2480 struct in_device *in_dev;
2481 mm_segment_t fs;
2482 struct ifreq if_info;
2483 struct sockaddr_in *if_data1, *if_data2;
2484
2485 chdlc_priv_area = dev->priv;
2486 port_num = card->u.c.comm_port;
2487
2488 /* Bug Fix Mar 16 2000
2489 * AND the IP address to the Mask before checking
2490 * the last two bits. */
2491
2492 if((chdlc_priv_area->route_status == ADD_ROUTE) &&
2493 ((chdlc_priv_area->IP_address & ~chdlc_priv_area->IP_netmask) > 2)) {
2494
2495 printk(KERN_INFO "%s: Dynamic route failure.\n",card->devname);
2496
2497 if(card->u.c.slarp_timer) {
2498 u32 addr_net = htonl(chdlc_priv_area->IP_address);
2499
2500 printk(KERN_INFO "%s: Bad IP address %u.%u.%u.%u received\n",
2501 card->devname,
2502 NIPQUAD(addr_net));
2503 printk(KERN_INFO "%s: from remote station.\n",
2504 card->devname);
2505
2506 }else{
2507 u32 addr_net = htonl(chdlc_priv_area->IP_address);
2508
2509 printk(KERN_INFO "%s: Bad IP address %u.%u.%u.%u issued\n",
2510 card->devname,
2511 NIPQUAD(addr_net));
2512 printk(KERN_INFO "%s: to remote station. Local\n",
2513 card->devname);
2514 printk(KERN_INFO "%s: IP address must be A.B.C.1\n",
2515 card->devname);
2516 printk(KERN_INFO "%s: or A.B.C.2.\n",card->devname);
2517 }
2518
2519 /* remove the route due to the IP address error condition */
2520 chdlc_priv_area->route_status = REMOVE_ROUTE;
2521 err = 1;
2522 }
2523
2524 /* If we are removing a route with bad IP addressing, then use the */
2525 /* locally configured IP addresses */
2526 if((chdlc_priv_area->route_status == REMOVE_ROUTE) && err) {
2527
2528 /* do not remove a bad route that has already been removed */
2529 if(chdlc_priv_area->route_removed) {
2530 return;
2531 }
2532
2533 in_dev = dev->ip_ptr;
2534
2535 if(in_dev != NULL) {
2536 struct in_ifaddr *ifa = in_dev->ifa_list;
2537 if (ifa != NULL ) {
2538 local_IP_addr = ifa->ifa_local;
2539 IP_netmask = ifa->ifa_mask;
2540 }
2541 }
2542 }else{
2543 /* According to Cisco HDLC, if the point-to-point address is
2544 A.B.C.1, then we are the opposite (A.B.C.2), and vice-versa.
2545 */
2546 IP_netmask = ntohl(chdlc_priv_area->IP_netmask);
2547 remote_IP_addr = ntohl(chdlc_priv_area->IP_address);
2548
2549
2550 /* If Netmask is 255.255.255.255 the local address
2551 * calculation will fail. Default it back to 255.255.255.0 */
2552 if (IP_netmask == 0xffffffff)
2553 IP_netmask &= 0x00ffffff;
2554
2555 /* Bug Fix Mar 16 2000
2556 * AND the Remote IP address with IP netmask, instead
2557 * of static netmask of 255.255.255.0 */
2558 local_IP_addr = (remote_IP_addr & IP_netmask) +
2559 (~remote_IP_addr & ntohl(0x0003));
2560
2561 if(!card->u.c.slarp_timer) {
2562 IP_addr = local_IP_addr;
2563 local_IP_addr = remote_IP_addr;
2564 remote_IP_addr = IP_addr;
2565 }
2566 }
2567
2568 fs = get_fs(); /* Save file system */
2569 set_fs(get_ds()); /* Get user space block */
2570
2571 /* Setup a structure for adding/removing routes */
2572 memset(&if_info, 0, sizeof(if_info));
2573 strcpy(if_info.ifr_name, dev->name);
2574
2575 switch (chdlc_priv_area->route_status) {
2576
2577 case ADD_ROUTE:
2578
2579 if(!card->u.c.slarp_timer) {
2580 if_data2 = (struct sockaddr_in *)&if_info.ifr_dstaddr;
2581 if_data2->sin_addr.s_addr = remote_IP_addr;
2582 if_data2->sin_family = AF_INET;
2583 err = devinet_ioctl(SIOCSIFDSTADDR, &if_info);
2584 } else {
2585 if_data1 = (struct sockaddr_in *)&if_info.ifr_addr;
2586 if_data1->sin_addr.s_addr = local_IP_addr;
2587 if_data1->sin_family = AF_INET;
2588 if(!(err = devinet_ioctl(SIOCSIFADDR, &if_info))){
2589 if_data2 = (struct sockaddr_in *)&if_info.ifr_dstaddr;
2590 if_data2->sin_addr.s_addr = remote_IP_addr;
2591 if_data2->sin_family = AF_INET;
2592 err = devinet_ioctl(SIOCSIFDSTADDR, &if_info);
2593 }
2594 }
2595
2596 if(err) {
2597 printk(KERN_INFO "%s: Add route %u.%u.%u.%u failed (%d)\n",
2598 card->devname, NIPQUAD(remote_IP_addr), err);
2599 } else {
2600 ((chdlc_private_area_t *)dev->priv)->route_status = ROUTE_ADDED;
2601 printk(KERN_INFO "%s: Dynamic route added.\n",
2602 card->devname);
2603 printk(KERN_INFO "%s: Local IP addr : %u.%u.%u.%u\n",
2604 card->devname, NIPQUAD(local_IP_addr));
2605 printk(KERN_INFO "%s: Remote IP addr: %u.%u.%u.%u\n",
2606 card->devname, NIPQUAD(remote_IP_addr));
2607 chdlc_priv_area->route_removed = 0;
2608 }
2609 break;
2610
2611
2612 case REMOVE_ROUTE:
2613
2614 /* Change the local ip address of the interface to 0.
2615 * This will also delete the destination route.
2616 */
2617 if(!card->u.c.slarp_timer) {
2618 if_data2 = (struct sockaddr_in *)&if_info.ifr_dstaddr;
2619 if_data2->sin_addr.s_addr = 0;
2620 if_data2->sin_family = AF_INET;
2621 err = devinet_ioctl(SIOCSIFDSTADDR, &if_info);
2622 } else {
2623 if_data1 = (struct sockaddr_in *)&if_info.ifr_addr;
2624 if_data1->sin_addr.s_addr = 0;
2625 if_data1->sin_family = AF_INET;
2626 err = devinet_ioctl(SIOCSIFADDR,&if_info);
2627
2628 }
2629 if(err) {
2630 printk(KERN_INFO
2631 "%s: Remove route %u.%u.%u.%u failed, (err %d)\n",
2632 card->devname, NIPQUAD(remote_IP_addr),
2633 err);
2634 } else {
2635 ((chdlc_private_area_t *)dev->priv)->route_status =
2636 NO_ROUTE;
2637 printk(KERN_INFO "%s: Dynamic route removed: %u.%u.%u.%u\n",
2638 card->devname, NIPQUAD(local_IP_addr));
2639 chdlc_priv_area->route_removed = 1;
2640 }
2641 break;
2642 }
2643
2644 set_fs(fs); /* Restore file system */
2645
2646}
2647
2648
2649/*=============================================================================
2650 * Store a UDP management packet for later processing.
2651 */
2652
2653static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
2654 struct sk_buff *skb, struct net_device* dev,
2655 chdlc_private_area_t* chdlc_priv_area)
2656{
2657 int udp_pkt_stored = 0;
2658
2659 if(!chdlc_priv_area->udp_pkt_lgth &&
2660 (skb->len <= MAX_LGTH_UDP_MGNT_PKT)) {
2661 chdlc_priv_area->udp_pkt_lgth = skb->len;
2662 chdlc_priv_area->udp_pkt_src = udp_pkt_src;
2663 memcpy(chdlc_priv_area->udp_pkt_data, skb->data, skb->len);
2664 chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UDP;
2665 udp_pkt_stored = 1;
2666 }
2667
2668 if(udp_pkt_src == UDP_PKT_FRM_STACK){
2669 dev_kfree_skb_any(skb);
2670 }else{
2671 dev_kfree_skb_any(skb);
2672 }
2673
2674 return(udp_pkt_stored);
2675}
2676
2677
2678/*=============================================================================
2679 * Process UDP management packet.
2680 */
2681
2682static int process_udp_mgmt_pkt(sdla_t* card, struct net_device* dev,
2683 chdlc_private_area_t* chdlc_priv_area )
2684{
2685 unsigned char *buf;
2686 unsigned int frames, len;
2687 struct sk_buff *new_skb;
2688 unsigned short buffer_length, real_len;
2689 unsigned long data_ptr;
2690 unsigned data_length;
2691 int udp_mgmt_req_valid = 1;
2692 CHDLC_MAILBOX_STRUCT *mb = card->mbox;
2693 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
2694 chdlc_udp_pkt_t *chdlc_udp_pkt;
2695 struct timeval tv;
2696 int err;
2697 char ut_char;
2698
2699 chdlc_udp_pkt = (chdlc_udp_pkt_t *) chdlc_priv_area->udp_pkt_data;
2700
2701 if(chdlc_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK){
2702
2703 /* Only these commands are support for remote debugging.
2704 * All others are not */
2705 switch(chdlc_udp_pkt->cblock.command) {
2706
2707 case READ_GLOBAL_STATISTICS:
2708 case READ_MODEM_STATUS:
2709 case READ_CHDLC_LINK_STATUS:
2710 case CPIPE_ROUTER_UP_TIME:
2711 case READ_COMMS_ERROR_STATS:
2712 case READ_CHDLC_OPERATIONAL_STATS:
2713
2714 /* These two commands are executed for
2715 * each request */
2716 case READ_CHDLC_CONFIGURATION:
2717 case READ_CHDLC_CODE_VERSION:
2718 udp_mgmt_req_valid = 1;
2719 break;
2720 default:
2721 udp_mgmt_req_valid = 0;
2722 break;
2723 }
2724 }
2725
2726 if(!udp_mgmt_req_valid) {
2727
2728 /* set length to 0 */
2729 chdlc_udp_pkt->cblock.buffer_length = 0;
2730
2731 /* set return code */
2732 chdlc_udp_pkt->cblock.return_code = 0xCD;
2733
2734 if (net_ratelimit()){
2735 printk(KERN_INFO
2736 "%s: Warning, Illegal UDP command attempted from network: %x\n",
2737 card->devname,chdlc_udp_pkt->cblock.command);
2738 }
2739
2740 } else {
2741 unsigned long trace_status_cfg_addr = 0;
2742 TRACE_STATUS_EL_CFG_STRUCT trace_cfg_struct;
2743 TRACE_STATUS_ELEMENT_STRUCT trace_element_struct;
2744
2745 switch(chdlc_udp_pkt->cblock.command) {
2746
2747 case CPIPE_ENABLE_TRACING:
2748 if (!chdlc_priv_area->TracingEnabled) {
2749
2750 /* OPERATE_DATALINE_MONITOR */
2751
2752 mb->buffer_length = sizeof(LINE_TRACE_CONFIG_STRUCT);
2753 mb->command = SET_TRACE_CONFIGURATION;
2754
2755 ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
2756 trace_config = TRACE_ACTIVE;
2757 /* Trace delay mode is not used because it slows
2758 down transfer and results in a standoff situation
2759 when there is a lot of data */
2760
2761 /* Configure the Trace based on user inputs */
2762 ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->trace_config |=
2763 chdlc_udp_pkt->data[0];
2764
2765 ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
2766 trace_deactivation_timer = 4000;
2767
2768
2769 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
2770 if (err != COMMAND_OK) {
2771 chdlc_error(card,err,mb);
2772 card->TracingEnabled = 0;
2773 chdlc_udp_pkt->cblock.return_code = err;
2774 mb->buffer_length = 0;
2775 break;
2776 }
2777
2778 /* Get the base address of the trace element list */
2779 mb->buffer_length = 0;
2780 mb->command = READ_TRACE_CONFIGURATION;
2781 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
2782
2783 if (err != COMMAND_OK) {
2784 chdlc_error(card,err,mb);
2785 chdlc_priv_area->TracingEnabled = 0;
2786 chdlc_udp_pkt->cblock.return_code = err;
2787 mb->buffer_length = 0;
2788 break;
2789 }
2790
2791 trace_status_cfg_addr =((LINE_TRACE_CONFIG_STRUCT *)
2792 mb->data) -> ptr_trace_stat_el_cfg_struct;
2793
2794 sdla_peek(&card->hw, trace_status_cfg_addr,
2795 &trace_cfg_struct, sizeof(trace_cfg_struct));
2796
2797 chdlc_priv_area->start_trace_addr = trace_cfg_struct.
2798 base_addr_trace_status_elements;
2799
2800 chdlc_priv_area->number_trace_elements =
2801 trace_cfg_struct.number_trace_status_elements;
2802
2803 chdlc_priv_area->end_trace_addr = (unsigned long)
2804 ((TRACE_STATUS_ELEMENT_STRUCT *)
2805 chdlc_priv_area->start_trace_addr +
2806 (chdlc_priv_area->number_trace_elements - 1));
2807
2808 chdlc_priv_area->base_addr_trace_buffer =
2809 trace_cfg_struct.base_addr_trace_buffer;
2810
2811 chdlc_priv_area->end_addr_trace_buffer =
2812 trace_cfg_struct.end_addr_trace_buffer;
2813
2814 chdlc_priv_area->curr_trace_addr =
2815 trace_cfg_struct.next_trace_element_to_use;
2816
2817 chdlc_priv_area->available_buffer_space = 2000 -
2818 sizeof(ip_pkt_t) -
2819 sizeof(udp_pkt_t) -
2820 sizeof(wp_mgmt_t) -
2821 sizeof(cblock_t) -
2822 sizeof(trace_info_t);
2823 }
2824 chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
2825 mb->buffer_length = 0;
2826 chdlc_priv_area->TracingEnabled = 1;
2827 break;
2828
2829
2830 case CPIPE_DISABLE_TRACING:
2831 if (chdlc_priv_area->TracingEnabled) {
2832
2833 /* OPERATE_DATALINE_MONITOR */
2834 mb->buffer_length = sizeof(LINE_TRACE_CONFIG_STRUCT);
2835 mb->command = SET_TRACE_CONFIGURATION;
2836 ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
2837 trace_config = TRACE_INACTIVE;
2838 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
2839 }
2840
2841 chdlc_priv_area->TracingEnabled = 0;
2842 chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
2843 mb->buffer_length = 0;
2844 break;
2845
2846
2847 case CPIPE_GET_TRACE_INFO:
2848
2849 if (!chdlc_priv_area->TracingEnabled) {
2850 chdlc_udp_pkt->cblock.return_code = 1;
2851 mb->buffer_length = 0;
2852 break;
2853 }
2854
2855 chdlc_udp_pkt->trace_info.ismoredata = 0x00;
2856 buffer_length = 0; /* offset of packet already occupied */
2857
2858 for (frames=0; frames < chdlc_priv_area->number_trace_elements; frames++){
2859
2860 trace_pkt_t *trace_pkt = (trace_pkt_t *)
2861 &chdlc_udp_pkt->data[buffer_length];
2862
2863 sdla_peek(&card->hw, chdlc_priv_area->curr_trace_addr,
2864 (unsigned char *)&trace_element_struct,
2865 sizeof(TRACE_STATUS_ELEMENT_STRUCT));
2866
2867 if (trace_element_struct.opp_flag == 0x00) {
2868 break;
2869 }
2870
2871 /* get pointer to real data */
2872 data_ptr = trace_element_struct.ptr_data_bfr;
2873
2874 /* See if there is actual data on the trace buffer */
2875 if (data_ptr){
2876 data_length = trace_element_struct.trace_length;
2877 }else{
2878 data_length = 0;
2879 chdlc_udp_pkt->trace_info.ismoredata = 0x01;
2880 }
2881
2882 if( (chdlc_priv_area->available_buffer_space - buffer_length)
2883 < ( sizeof(trace_pkt_t) + data_length) ) {
2884
2885 /* indicate there are more frames on board & exit */
2886 chdlc_udp_pkt->trace_info.ismoredata = 0x01;
2887 break;
2888 }
2889
2890 trace_pkt->status = trace_element_struct.trace_type;
2891
2892 trace_pkt->time_stamp =
2893 trace_element_struct.trace_time_stamp;
2894
2895 trace_pkt->real_length =
2896 trace_element_struct.trace_length;
2897
2898 /* see if we can fit the frame into the user buffer */
2899 real_len = trace_pkt->real_length;
2900
2901 if (data_ptr == 0) {
2902 trace_pkt->data_avail = 0x00;
2903 } else {
2904 unsigned tmp = 0;
2905
2906 /* get the data from circular buffer
2907 must check for end of buffer */
2908 trace_pkt->data_avail = 0x01;
2909
2910 if ((data_ptr + real_len) >
2911 chdlc_priv_area->end_addr_trace_buffer + 1){
2912
2913 tmp = chdlc_priv_area->end_addr_trace_buffer - data_ptr + 1;
2914 sdla_peek(&card->hw, data_ptr,
2915 trace_pkt->data,tmp);
2916 data_ptr = chdlc_priv_area->base_addr_trace_buffer;
2917 }
2918
2919 sdla_peek(&card->hw, data_ptr,
2920 &trace_pkt->data[tmp], real_len - tmp);
2921 }
2922
2923 /* zero the opp flag to show we got the frame */
2924 ut_char = 0x00;
2925 sdla_poke(&card->hw, chdlc_priv_area->curr_trace_addr, &ut_char, 1);
2926
2927 /* now move onto the next frame */
2928 chdlc_priv_area->curr_trace_addr += sizeof(TRACE_STATUS_ELEMENT_STRUCT);
2929
2930 /* check if we went over the last address */
2931 if ( chdlc_priv_area->curr_trace_addr > chdlc_priv_area->end_trace_addr ) {
2932 chdlc_priv_area->curr_trace_addr = chdlc_priv_area->start_trace_addr;
2933 }
2934
2935 if(trace_pkt->data_avail == 0x01) {
2936 buffer_length += real_len - 1;
2937 }
2938
2939 /* for the header */
2940 buffer_length += sizeof(trace_pkt_t);
2941
2942 } /* For Loop */
2943
2944 if (frames == chdlc_priv_area->number_trace_elements){
2945 chdlc_udp_pkt->trace_info.ismoredata = 0x01;
2946 }
2947 chdlc_udp_pkt->trace_info.num_frames = frames;
2948
2949 mb->buffer_length = buffer_length;
2950 chdlc_udp_pkt->cblock.buffer_length = buffer_length;
2951
2952 chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
2953
2954 break;
2955
2956
2957 case CPIPE_FT1_READ_STATUS:
2958 ((unsigned char *)chdlc_udp_pkt->data )[0] =
2959 flags->FT1_info_struct.parallel_port_A_input;
2960
2961 ((unsigned char *)chdlc_udp_pkt->data )[1] =
2962 flags->FT1_info_struct.parallel_port_B_input;
2963
2964 chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
2965 chdlc_udp_pkt->cblock.buffer_length = 2;
2966 mb->buffer_length = 2;
2967 break;
2968
2969 case CPIPE_ROUTER_UP_TIME:
2970 do_gettimeofday( &tv );
2971 chdlc_priv_area->router_up_time = tv.tv_sec -
2972 chdlc_priv_area->router_start_time;
2973 *(unsigned long *)&chdlc_udp_pkt->data =
2974 chdlc_priv_area->router_up_time;
2975 mb->buffer_length = sizeof(unsigned long);
2976 chdlc_udp_pkt->cblock.buffer_length = sizeof(unsigned long);
2977 chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
2978 break;
2979
2980 case FT1_MONITOR_STATUS_CTRL:
2981 /* Enable FT1 MONITOR STATUS */
2982 if ((chdlc_udp_pkt->data[0] & ENABLE_READ_FT1_STATUS) ||
2983 (chdlc_udp_pkt->data[0] & ENABLE_READ_FT1_OP_STATS)) {
2984
2985 if( rCount++ != 0 ) {
2986 chdlc_udp_pkt->cblock.
2987 return_code = COMMAND_OK;
2988 mb->buffer_length = 1;
2989 break;
2990 }
2991 }
2992
2993 /* Disable FT1 MONITOR STATUS */
2994 if( chdlc_udp_pkt->data[0] == 0) {
2995
2996 if( --rCount != 0) {
2997 chdlc_udp_pkt->cblock.
2998 return_code = COMMAND_OK;
2999 mb->buffer_length = 1;
3000 break;
3001 }
3002 }
3003 goto dflt_1;
3004
3005 default:
3006dflt_1:
3007 /* it's a board command */
3008 mb->command = chdlc_udp_pkt->cblock.command;
3009 mb->buffer_length = chdlc_udp_pkt->cblock.buffer_length;
3010 if (mb->buffer_length) {
3011 memcpy(&mb->data, (unsigned char *) chdlc_udp_pkt->
3012 data, mb->buffer_length);
3013 }
3014 /* run the command on the board */
3015 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
3016 if (err != COMMAND_OK) {
3017 break;
3018 }
3019
3020 /* copy the result back to our buffer */
3021 memcpy(&chdlc_udp_pkt->cblock, mb, sizeof(cblock_t));
3022
3023 if (mb->buffer_length) {
3024 memcpy(&chdlc_udp_pkt->data, &mb->data,
3025 mb->buffer_length);
3026 }
3027
3028 } /* end of switch */
3029 } /* end of else */
3030
3031 /* Fill UDP TTL */
3032 chdlc_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
3033
3034 len = reply_udp(chdlc_priv_area->udp_pkt_data, mb->buffer_length);
3035
3036
3037 if(chdlc_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK){
3038
3039 /* Must check if we interrupted if_send() routine. The
3040 * tx buffers might be used. If so drop the packet */
3041 if (!test_bit(SEND_CRIT,&card->wandev.critical)) {
3042
3043 if(!chdlc_send(card, chdlc_priv_area->udp_pkt_data, len)) {
3044 ++ card->wandev.stats.tx_packets;
3045 card->wandev.stats.tx_bytes += len;
3046 }
3047 }
3048 } else {
3049
3050 /* Pass it up the stack
3051 Allocate socket buffer */
3052 if ((new_skb = dev_alloc_skb(len)) != NULL) {
3053 /* copy data into new_skb */
3054
3055 buf = skb_put(new_skb, len);
3056 memcpy(buf, chdlc_priv_area->udp_pkt_data, len);
3057
3058 /* Decapsulate pkt and pass it up the protocol stack */
3059 new_skb->protocol = htons(ETH_P_IP);
3060 new_skb->dev = dev;
3061 new_skb->mac.raw = new_skb->data;
3062
3063 netif_rx(new_skb);
3064 dev->last_rx = jiffies;
3065 } else {
3066
3067 printk(KERN_INFO "%s: no socket buffers available!\n",
3068 card->devname);
3069 }
3070 }
3071
3072 chdlc_priv_area->udp_pkt_lgth = 0;
3073
3074 return 0;
3075}
3076
3077/*============================================================================
3078 * Initialize Receive and Transmit Buffers.
3079 */
3080
3081static void init_chdlc_tx_rx_buff( sdla_t* card)
3082{
3083 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
3084 CHDLC_TX_STATUS_EL_CFG_STRUCT *tx_config;
3085 CHDLC_RX_STATUS_EL_CFG_STRUCT *rx_config;
3086 char err;
3087
3088 mb->buffer_length = 0;
3089 mb->command = READ_CHDLC_CONFIGURATION;
3090 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
3091
3092 if(err != COMMAND_OK) {
3093 if (card->wandev.dev){
3094 chdlc_error(card,err,mb);
3095 }
3096 return;
3097 }
3098
3099 if(card->hw.type == SDLA_S514) {
3100 tx_config = (CHDLC_TX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
3101 (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
3102 ptr_CHDLC_Tx_stat_el_cfg_struct));
3103 rx_config = (CHDLC_RX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
3104 (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
3105 ptr_CHDLC_Rx_stat_el_cfg_struct));
3106
3107 /* Setup Head and Tails for buffers */
3108 card->u.c.txbuf_base = (void *)(card->hw.dpmbase +
3109 tx_config->base_addr_Tx_status_elements);
3110 card->u.c.txbuf_last =
3111 (CHDLC_DATA_TX_STATUS_EL_STRUCT *)
3112 card->u.c.txbuf_base +
3113 (tx_config->number_Tx_status_elements - 1);
3114
3115 card->u.c.rxbuf_base = (void *)(card->hw.dpmbase +
3116 rx_config->base_addr_Rx_status_elements);
3117 card->u.c.rxbuf_last =
3118 (CHDLC_DATA_RX_STATUS_EL_STRUCT *)
3119 card->u.c.rxbuf_base +
3120 (rx_config->number_Rx_status_elements - 1);
3121
3122 /* Set up next pointer to be used */
3123 card->u.c.txbuf = (void *)(card->hw.dpmbase +
3124 tx_config->next_Tx_status_element_to_use);
3125 card->u.c.rxmb = (void *)(card->hw.dpmbase +
3126 rx_config->next_Rx_status_element_to_use);
3127 }
3128 else {
3129 tx_config = (CHDLC_TX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
3130 (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
3131 ptr_CHDLC_Tx_stat_el_cfg_struct % SDLA_WINDOWSIZE));
3132
3133 rx_config = (CHDLC_RX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
3134 (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
3135 ptr_CHDLC_Rx_stat_el_cfg_struct % SDLA_WINDOWSIZE));
3136
3137 /* Setup Head and Tails for buffers */
3138 card->u.c.txbuf_base = (void *)(card->hw.dpmbase +
3139 (tx_config->base_addr_Tx_status_elements % SDLA_WINDOWSIZE));
3140 card->u.c.txbuf_last =
3141 (CHDLC_DATA_TX_STATUS_EL_STRUCT *)card->u.c.txbuf_base
3142 + (tx_config->number_Tx_status_elements - 1);
3143 card->u.c.rxbuf_base = (void *)(card->hw.dpmbase +
3144 (rx_config->base_addr_Rx_status_elements % SDLA_WINDOWSIZE));
3145 card->u.c.rxbuf_last =
3146 (CHDLC_DATA_RX_STATUS_EL_STRUCT *)card->u.c.rxbuf_base
3147 + (rx_config->number_Rx_status_elements - 1);
3148
3149 /* Set up next pointer to be used */
3150 card->u.c.txbuf = (void *)(card->hw.dpmbase +
3151 (tx_config->next_Tx_status_element_to_use % SDLA_WINDOWSIZE));
3152 card->u.c.rxmb = (void *)(card->hw.dpmbase +
3153 (rx_config->next_Rx_status_element_to_use % SDLA_WINDOWSIZE));
3154 }
3155
3156 /* Setup Actual Buffer Start and end addresses */
3157 card->u.c.rx_base = rx_config->base_addr_Rx_buffer;
3158 card->u.c.rx_top = rx_config->end_addr_Rx_buffer;
3159
3160}
3161
3162/*=============================================================================
3163 * Perform Interrupt Test by running READ_CHDLC_CODE_VERSION command MAX_INTR
3164 * _TEST_COUNTER times.
3165 */
3166static int intr_test( sdla_t* card)
3167{
3168 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
3169 int err,i;
3170
3171 Intr_test_counter = 0;
3172
3173 err = chdlc_set_intr_mode(card, APP_INT_ON_COMMAND_COMPLETE);
3174
3175 if (err == CMD_OK) {
3176 for (i = 0; i < MAX_INTR_TEST_COUNTER; i ++) {
3177 mb->buffer_length = 0;
3178 mb->command = READ_CHDLC_CODE_VERSION;
3179 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
3180 if (err != CMD_OK)
3181 chdlc_error(card, err, mb);
3182 }
3183 }
3184 else {
3185 return err;
3186 }
3187
3188 err = chdlc_set_intr_mode(card, 0);
3189
3190 if (err != CMD_OK)
3191 return err;
3192
3193 return 0;
3194}
3195
3196/*==============================================================================
3197 * Determine what type of UDP call it is. CPIPEAB ?
3198 */
3199static int udp_pkt_type(struct sk_buff *skb, sdla_t* card)
3200{
3201 chdlc_udp_pkt_t *chdlc_udp_pkt = (chdlc_udp_pkt_t *)skb->data;
3202
3203#ifdef _WAN_UDP_DEBUG
3204 printk(KERN_INFO "SIG %s = %s\n\
3205 UPP %x = %x\n\
3206 PRT %x = %x\n\
3207 REQ %i = %i\n\
3208 36 th = %x 37th = %x\n",
3209 chdlc_udp_pkt->wp_mgmt.signature,
3210 UDPMGMT_SIGNATURE,
3211 chdlc_udp_pkt->udp_pkt.udp_dst_port,
3212 ntohs(card->wandev.udp_port),
3213 chdlc_udp_pkt->ip_pkt.protocol,
3214 UDPMGMT_UDP_PROTOCOL,
3215 chdlc_udp_pkt->wp_mgmt.request_reply,
3216 UDPMGMT_REQUEST,
3217 skb->data[36], skb->data[37]);
3218#endif
3219
3220 if (!strncmp(chdlc_udp_pkt->wp_mgmt.signature,UDPMGMT_SIGNATURE,8) &&
3221 (chdlc_udp_pkt->udp_pkt.udp_dst_port == ntohs(card->wandev.udp_port)) &&
3222 (chdlc_udp_pkt->ip_pkt.protocol == UDPMGMT_UDP_PROTOCOL) &&
3223 (chdlc_udp_pkt->wp_mgmt.request_reply == UDPMGMT_REQUEST)) {
3224
3225 return UDP_CPIPE_TYPE;
3226
3227 }else{
3228 return UDP_INVALID_TYPE;
3229 }
3230}
3231
3232/*============================================================================
3233 * Set PORT state.
3234 */
3235static void port_set_state (sdla_t *card, int state)
3236{
3237 if (card->u.c.state != state)
3238 {
3239 switch (state)
3240 {
3241 case WAN_CONNECTED:
3242 printk (KERN_INFO "%s: Link connected!\n",
3243 card->devname);
3244 break;
3245
3246 case WAN_CONNECTING:
3247 printk (KERN_INFO "%s: Link connecting...\n",
3248 card->devname);
3249 break;
3250
3251 case WAN_DISCONNECTED:
3252 printk (KERN_INFO "%s: Link disconnected!\n",
3253 card->devname);
3254 break;
3255 }
3256
3257 card->wandev.state = card->u.c.state = state;
3258 if (card->wandev.dev){
3259 struct net_device *dev = card->wandev.dev;
3260 chdlc_private_area_t *chdlc_priv_area = dev->priv;
3261 chdlc_priv_area->common.state = state;
3262 }
3263 }
3264}
3265
3266/*===========================================================================
3267 * config_chdlc
3268 *
3269 * Configure the chdlc protocol and enable communications.
3270 *
3271 * The if_open() function binds this function to the poll routine.
3272 * Therefore, this function will run every time the chdlc interface
3273 * is brought up. We cannot run this function from the if_open
3274 * because if_open does not have access to the remote IP address.
3275 *
3276 * If the communications are not enabled, proceed to configure
3277 * the card and enable communications.
3278 *
3279 * If the communications are enabled, it means that the interface
3280 * was shutdown by ether the user or driver. In this case, we
3281 * have to check that the IP addresses have not changed. If
3282 * the IP addresses have changed, we have to reconfigure the firmware
3283 * and update the changed IP addresses. Otherwise, just exit.
3284 *
3285 */
3286
3287static int config_chdlc (sdla_t *card)
3288{
3289 struct net_device *dev = card->wandev.dev;
3290 chdlc_private_area_t *chdlc_priv_area = dev->priv;
3291 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
3292
3293 if (card->u.c.comm_enabled){
3294
3295 /* Jun 20. 2000: NC
3296 * IP addresses are not used in the API mode */
3297
3298 if ((chdlc_priv_area->ip_local_tmp != chdlc_priv_area->ip_local ||
3299 chdlc_priv_area->ip_remote_tmp != chdlc_priv_area->ip_remote) &&
3300 card->u.c.usedby == WANPIPE) {
3301
3302 /* The IP addersses have changed, we must
3303 * stop the communications and reconfigure
3304 * the card. Reason: the firmware must know
3305 * the local and remote IP addresses. */
3306 disable_comm(card);
3307 port_set_state(card, WAN_DISCONNECTED);
3308 printk(KERN_INFO
3309 "%s: IP addresses changed!\n",
3310 card->devname);
3311 printk(KERN_INFO
3312 "%s: Restarting communications ...\n",
3313 card->devname);
3314 }else{
3315 /* IP addresses are the same and the link is up,
3316 * we don't have to do anything here. Therefore, exit */
3317 return 0;
3318 }
3319 }
3320
3321 chdlc_priv_area->ip_local = chdlc_priv_area->ip_local_tmp;
3322 chdlc_priv_area->ip_remote = chdlc_priv_area->ip_remote_tmp;
3323
3324
3325 /* Setup the Board for asynchronous mode */
3326 if (card->u.c.async_mode){
3327
3328 if (set_asy_config(card)) {
3329 printk (KERN_INFO "%s: Failed CHDLC Async configuration!\n",
3330 card->devname);
3331 return 0;
3332 }
3333 }else{
3334 /* Setup the Board for CHDLC */
3335 if (set_chdlc_config(card)) {
3336 printk (KERN_INFO "%s: Failed CHDLC configuration!\n",
3337 card->devname);
3338 return 0;
3339 }
3340 }
3341
3342 /* Set interrupt mode and mask */
3343 if (chdlc_set_intr_mode(card, APP_INT_ON_RX_FRAME |
3344 APP_INT_ON_GLOBAL_EXCEP_COND |
3345 APP_INT_ON_TX_FRAME |
3346 APP_INT_ON_CHDLC_EXCEP_COND | APP_INT_ON_TIMER)){
3347 printk (KERN_INFO "%s: Failed to set interrupt triggers!\n",
3348 card->devname);
3349 return 0;
3350 }
3351
3352
3353 /* Mask the Transmit and Timer interrupt */
3354 flags->interrupt_info_struct.interrupt_permission &=
3355 ~(APP_INT_ON_TX_FRAME | APP_INT_ON_TIMER);
3356
3357 /* In TTY mode, receive interrupt will be enabled during
3358 * wanpipe_tty_open() operation */
3359 if (card->tty_opt){
3360 flags->interrupt_info_struct.interrupt_permission &= ~APP_INT_ON_RX_FRAME;
3361 }
3362
3363 /* Enable communications */
3364 if (card->u.c.async_mode){
3365 if (asy_comm_enable(card) != 0) {
3366 printk(KERN_INFO "%s: Failed to enable async commnunication!\n",
3367 card->devname);
3368 flags->interrupt_info_struct.interrupt_permission = 0;
3369 card->u.c.comm_enabled=0;
3370 chdlc_set_intr_mode(card,0);
3371 return 0;
3372 }
3373 }else{
3374 if (chdlc_comm_enable(card) != 0) {
3375 printk(KERN_INFO "%s: Failed to enable chdlc communications!\n",
3376 card->devname);
3377 flags->interrupt_info_struct.interrupt_permission = 0;
3378 card->u.c.comm_enabled=0;
3379 chdlc_set_intr_mode(card,0);
3380 return 0;
3381 }
3382 }
3383
3384 /* Initialize Rx/Tx buffer control fields */
3385 init_chdlc_tx_rx_buff(card);
3386 port_set_state(card, WAN_CONNECTING);
3387 return 0;
3388}
3389
3390
3391/*============================================================
3392 * chdlc_poll
3393 *
3394 * Rationale:
3395 * We cannot manipulate the routing tables, or
3396 * ip addresses withing the interrupt. Therefore
3397 * we must perform such actons outside an interrupt
3398 * at a later time.
3399 *
3400 * Description:
3401 * CHDLC polling routine, responsible for
3402 * shutting down interfaces upon disconnect
3403 * and adding/removing routes.
3404 *
3405 * Usage:
3406 * This function is executed for each CHDLC
3407 * interface through a tq_schedule bottom half.
3408 *
3409 * trigger_chdlc_poll() function is used to kick
3410 * the chldc_poll routine.
3411 */
3412
3413static void chdlc_poll(struct net_device *dev)
3414{
3415 chdlc_private_area_t *chdlc_priv_area;
3416 sdla_t *card;
3417 u8 check_gateway=0;
3418 SHARED_MEMORY_INFO_STRUCT* flags;
3419
3420
3421 if (!dev || (chdlc_priv_area=dev->priv) == NULL)
3422 return;
3423
3424 card = chdlc_priv_area->card;
3425 flags = card->u.c.flags;
3426
3427 /* (Re)Configuraiton is in progress, stop what you are
3428 * doing and get out */
3429 if (test_bit(PERI_CRIT,&card->wandev.critical)){
3430 clear_bit(POLL_CRIT,&card->wandev.critical);
3431 return;
3432 }
3433
3434 /* if_open() function has triggered the polling routine
3435 * to determine the configured IP addresses. Once the
3436 * addresses are found, trigger the chdlc configuration */
3437 if (test_bit(0,&chdlc_priv_area->config_chdlc)){
3438
3439 chdlc_priv_area->ip_local_tmp = get_ip_address(dev,WAN_LOCAL_IP);
3440 chdlc_priv_area->ip_remote_tmp = get_ip_address(dev,WAN_POINTOPOINT_IP);
3441
3442 /* Jun 20. 2000 Bug Fix
3443 * Only perform this check in WANPIPE mode, since
3444 * IP addresses are not used in the API mode. */
3445
3446 if (chdlc_priv_area->ip_local_tmp == chdlc_priv_area->ip_remote_tmp &&
3447 card->u.c.slarp_timer == 0x00 &&
3448 !card->u.c.backup &&
3449 card->u.c.usedby == WANPIPE){
3450
3451 if (++chdlc_priv_area->ip_error > MAX_IP_ERRORS){
3452 printk(KERN_INFO "\n%s: --- WARNING ---\n",
3453 card->devname);
3454 printk(KERN_INFO
3455 "%s: The local IP address is the same as the\n",
3456 card->devname);
3457 printk(KERN_INFO
3458 "%s: Point-to-Point IP address.\n",
3459 card->devname);
3460 printk(KERN_INFO "%s: --- WARNING ---\n\n",
3461 card->devname);
3462 }else{
3463 clear_bit(POLL_CRIT,&card->wandev.critical);
3464 chdlc_priv_area->poll_delay_timer.expires = jiffies+HZ;
3465 add_timer(&chdlc_priv_area->poll_delay_timer);
3466 return;
3467 }
3468 }
3469
3470 clear_bit(0,&chdlc_priv_area->config_chdlc);
3471 clear_bit(POLL_CRIT,&card->wandev.critical);
3472
3473 chdlc_priv_area->timer_int_enabled |= TMR_INT_ENABLED_CONFIG;
3474 flags->interrupt_info_struct.interrupt_permission |= APP_INT_ON_TIMER;
3475 return;
3476 }
3477 /* Dynamic interface implementation, as well as dynamic
3478 * routing. */
3479
3480 switch (card->u.c.state){
3481
3482 case WAN_DISCONNECTED:
3483
3484 /* If the dynamic interface configuration is on, and interface
3485 * is up, then bring down the netowrk interface */
3486
3487 if (test_bit(DYN_OPT_ON,&chdlc_priv_area->interface_down) &&
3488 !test_bit(DEV_DOWN, &chdlc_priv_area->interface_down) &&
3489 card->wandev.dev->flags & IFF_UP){
3490
3491 printk(KERN_INFO "%s: Interface %s down.\n",
3492 card->devname,card->wandev.dev->name);
3493 change_dev_flags(card->wandev.dev,(card->wandev.dev->flags&~IFF_UP));
3494 set_bit(DEV_DOWN,&chdlc_priv_area->interface_down);
3495 chdlc_priv_area->route_status = NO_ROUTE;
3496
3497 }else{
3498 /* We need to check if the local IP address is
3499 * zero. If it is, we shouldn't try to remove it.
3500 */
3501
3502 if (card->wandev.dev->flags & IFF_UP &&
3503 get_ip_address(card->wandev.dev,WAN_LOCAL_IP) &&
3504 chdlc_priv_area->route_status != NO_ROUTE &&
3505 card->u.c.slarp_timer){
3506
3507 process_route(card);
3508 }
3509 }
3510 break;
3511
3512 case WAN_CONNECTED:
3513
3514 /* In SMP machine this code can execute before the interface
3515 * comes up. In this case, we must make sure that we do not
3516 * try to bring up the interface before dev_open() is finished */
3517
3518
3519 /* DEV_DOWN will be set only when we bring down the interface
3520 * for the very first time. This way we know that it was us
3521 * that brought the interface down */
3522
3523 if (test_bit(DYN_OPT_ON,&chdlc_priv_area->interface_down) &&
3524 test_bit(DEV_DOWN, &chdlc_priv_area->interface_down) &&
3525 !(card->wandev.dev->flags & IFF_UP)){
3526
3527 printk(KERN_INFO "%s: Interface %s up.\n",
3528 card->devname,card->wandev.dev->name);
3529 change_dev_flags(card->wandev.dev,(card->wandev.dev->flags|IFF_UP));
3530 clear_bit(DEV_DOWN,&chdlc_priv_area->interface_down);
3531 check_gateway=1;
3532 }
3533
3534 if (chdlc_priv_area->route_status == ADD_ROUTE &&
3535 card->u.c.slarp_timer){
3536
3537 process_route(card);
3538 check_gateway=1;
3539 }
3540
3541 if (chdlc_priv_area->gateway && check_gateway)
3542 add_gateway(card,dev);
3543
3544 break;
3545 }
3546
3547 clear_bit(POLL_CRIT,&card->wandev.critical);
3548}
3549
3550/*============================================================
3551 * trigger_chdlc_poll
3552 *
3553 * Description:
3554 * Add a chdlc_poll() work entry into the keventd work queue
3555 * for a specific dlci/interface. This will kick
3556 * the fr_poll() routine at a later time.
3557 *
3558 * Usage:
3559 * Interrupts use this to defer a taks to
3560 * a polling routine.
3561 *
3562 */
3563static void trigger_chdlc_poll(struct net_device *dev)
3564{
3565 chdlc_private_area_t *chdlc_priv_area;
3566 sdla_t *card;
3567
3568 if (!dev)
3569 return;
3570
3571 if ((chdlc_priv_area = dev->priv)==NULL)
3572 return;
3573
3574 card = chdlc_priv_area->card;
3575
3576 if (test_and_set_bit(POLL_CRIT,&card->wandev.critical)){
3577 return;
3578 }
3579 if (test_bit(PERI_CRIT,&card->wandev.critical)){
3580 return;
3581 }
3582 schedule_work(&chdlc_priv_area->poll_work);
3583}
3584
3585
3586static void chdlc_poll_delay (unsigned long dev_ptr)
3587{
3588 struct net_device *dev = (struct net_device *)dev_ptr;
3589 trigger_chdlc_poll(dev);
3590}
3591
3592
3593void s508_lock (sdla_t *card, unsigned long *smp_flags)
3594{
3595 spin_lock_irqsave(&card->wandev.lock, *smp_flags);
3596 if (card->next){
3597 spin_lock(&card->next->wandev.lock);
3598 }
3599}
3600
3601void s508_unlock (sdla_t *card, unsigned long *smp_flags)
3602{
3603 if (card->next){
3604 spin_unlock(&card->next->wandev.lock);
3605 }
3606 spin_unlock_irqrestore(&card->wandev.lock, *smp_flags);
3607}
3608
3609//*********** TTY SECTION ****************
3610
3611static void wanpipe_tty_trigger_tx_irq(sdla_t *card)
3612{
3613 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
3614 INTERRUPT_INFORMATION_STRUCT *chdlc_int = &flags->interrupt_info_struct;
3615 chdlc_int->interrupt_permission |= APP_INT_ON_TX_FRAME;
3616}
3617
3618static void wanpipe_tty_trigger_poll(sdla_t *card)
3619{
3620 schedule_work(&card->tty_work);
3621}
3622
3623static void tty_poll_work (void* data)
3624{
3625 sdla_t *card = (sdla_t*)data;
3626 struct tty_struct *tty;
3627
3628 if ((tty=card->tty)==NULL)
3629 return;
3630
3631 tty_wakeup(tty);
3632#if defined(SERIAL_HAVE_POLL_WAIT)
3633 wake_up_interruptible(&tty->poll_wait);
3634#endif
3635 return;
3636}
3637
3638static void wanpipe_tty_close(struct tty_struct *tty, struct file * filp)
3639{
3640 sdla_t *card;
3641 unsigned long smp_flags;
3642
3643 if (!tty || !tty->driver_data){
3644 return;
3645 }
3646
3647 card = (sdla_t*)tty->driver_data;
3648
3649 if (!card)
3650 return;
3651
3652 printk(KERN_INFO "%s: Closing TTY Driver!\n",
3653 card->devname);
3654
3655 /* Sanity Check */
3656 if (!card->tty_open)
3657 return;
3658
3659 wanpipe_close(card);
3660 if (--card->tty_open == 0){
3661
3662 lock_adapter_irq(&card->wandev.lock,&smp_flags);
3663 card->tty=NULL;
3664 chdlc_disable_comm_shutdown(card);
3665 unlock_adapter_irq(&card->wandev.lock,&smp_flags);
3666
3667 if (card->tty_buf){
3668 kfree(card->tty_buf);
3669 card->tty_buf=NULL;
3670 }
3671
3672 if (card->tty_rx){
3673 kfree(card->tty_rx);
3674 card->tty_rx=NULL;
3675 }
3676 }
3677 return;
3678}
3679static int wanpipe_tty_open(struct tty_struct *tty, struct file * filp)
3680{
3681 unsigned long smp_flags;
3682 sdla_t *card;
3683
3684 if (!tty){
3685 return -ENODEV;
3686 }
3687
3688 if (!tty->driver_data){
3689 int port;
3690 port = tty->index;
3691 if ((port < 0) || (port >= NR_PORTS))
3692 return -ENODEV;
3693
3694 tty->driver_data = WAN_CARD(port);
3695 if (!tty->driver_data)
3696 return -ENODEV;
3697 }
3698
3699 card = (sdla_t*)tty->driver_data;
3700
3701 if (!card){
3702 lock_adapter_irq(&card->wandev.lock,&smp_flags);
3703 card->tty=NULL;
3704 unlock_adapter_irq(&card->wandev.lock,&smp_flags);
3705 return -ENODEV;
3706 }
3707
3708 printk(KERN_INFO "%s: Opening TTY Driver!\n",
3709 card->devname);
3710
3711 if (card->tty_open == 0){
3712 lock_adapter_irq(&card->wandev.lock,&smp_flags);
3713 card->tty=tty;
3714 unlock_adapter_irq(&card->wandev.lock,&smp_flags);
3715
3716 if (!card->tty_buf){
3717 card->tty_buf = kmalloc(TTY_CHDLC_MAX_MTU, GFP_KERNEL);
3718 if (!card->tty_buf){
3719 card->tty_buf=NULL;
3720 card->tty=NULL;
3721 return -ENOMEM;
3722 }
3723 }
3724
3725 if (!card->tty_rx){
3726 card->tty_rx = kmalloc(TTY_CHDLC_MAX_MTU, GFP_KERNEL);
3727 if (!card->tty_rx){
3728 /* Free the buffer above */
3729 kfree(card->tty_buf);
3730 card->tty_buf=NULL;
3731 card->tty=NULL;
3732 return -ENOMEM;
3733 }
3734 }
3735 }
3736
3737 ++card->tty_open;
3738 wanpipe_open(card);
3739 return 0;
3740}
3741
3742static int wanpipe_tty_write(struct tty_struct * tty, const unsigned char *buf, int count)
3743{
3744 unsigned long smp_flags=0;
3745 sdla_t *card=NULL;
3746
3747 if (!tty){
3748 dbg_printk(KERN_INFO "NO TTY in Write\n");
3749 return -ENODEV;
3750 }
3751
3752 card = (sdla_t *)tty->driver_data;
3753
3754 if (!card){
3755 dbg_printk(KERN_INFO "No Card in TTY Write\n");
3756 return -ENODEV;
3757 }
3758
3759 if (count > card->wandev.mtu){
3760 dbg_printk(KERN_INFO "Frame too big in Write %i Max: %i\n",
3761 count,card->wandev.mtu);
3762 return -EINVAL;
3763 }
3764
3765 if (card->wandev.state != WAN_CONNECTED){
3766 dbg_printk(KERN_INFO "Card not connected in TTY Write\n");
3767 return -EINVAL;
3768 }
3769
3770 /* Lock the 508 Card: SMP is supported */
3771 if(card->hw.type != SDLA_S514){
3772 s508_lock(card,&smp_flags);
3773 }
3774
3775 if (test_and_set_bit(SEND_CRIT,(void*)&card->wandev.critical)){
3776 printk(KERN_INFO "%s: Critical in TTY Write\n",
3777 card->devname);
3778
3779 /* Lock the 508 Card: SMP is supported */
3780 if(card->hw.type != SDLA_S514)
3781 s508_unlock(card,&smp_flags);
3782
3783 return -EINVAL;
3784 }
3785
3786 if (chdlc_send(card,(void*)buf,count)){
3787 dbg_printk(KERN_INFO "%s: Failed to send, retry later: kernel!\n",
3788 card->devname);
3789 clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
3790
3791 wanpipe_tty_trigger_tx_irq(card);
3792
3793 if(card->hw.type != SDLA_S514)
3794 s508_unlock(card,&smp_flags);
3795 return 0;
3796 }
3797 dbg_printk(KERN_INFO "%s: Packet sent OK: %i\n",card->devname,count);
3798 clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
3799
3800 if(card->hw.type != SDLA_S514)
3801 s508_unlock(card,&smp_flags);
3802
3803 return count;
3804}
3805
3806static void wanpipe_tty_receive(sdla_t *card, unsigned addr, unsigned int len)
3807{
3808 unsigned offset=0;
3809 unsigned olen=len;
3810 char fp=0;
3811 struct tty_struct *tty;
3812 int i;
3813 struct tty_ldisc *ld;
3814
3815 if (!card->tty_open){
3816 dbg_printk(KERN_INFO "%s: TTY not open during receive\n",
3817 card->devname);
3818 return;
3819 }
3820
3821 if ((tty=card->tty) == NULL){
3822 dbg_printk(KERN_INFO "%s: No TTY on receive\n",
3823 card->devname);
3824 return;
3825 }
3826
3827 if (!tty->driver_data){
3828 dbg_printk(KERN_INFO "%s: No Driver Data, or Flip on receive\n",
3829 card->devname);
3830 return;
3831 }
3832
3833
3834 if (card->u.c.async_mode){
3835 if ((tty->flip.count+len) >= TTY_FLIPBUF_SIZE){
3836 if (net_ratelimit()){
3837 printk(KERN_INFO
3838 "%s: Received packet size too big: %i bytes, Max: %i!\n",
3839 card->devname,len,TTY_FLIPBUF_SIZE);
3840 }
3841 return;
3842 }
3843
3844
3845 if((addr + len) > card->u.c.rx_top + 1) {
3846 offset = card->u.c.rx_top - addr + 1;
3847
3848 sdla_peek(&card->hw, addr, tty->flip.char_buf_ptr, offset);
3849
3850 addr = card->u.c.rx_base;
3851 len -= offset;
3852
3853 tty->flip.char_buf_ptr+=offset;
3854 tty->flip.count+=offset;
3855 for (i=0;i<offset;i++){
3856 *tty->flip.flag_buf_ptr = 0;
3857 tty->flip.flag_buf_ptr++;
3858 }
3859 }
3860
3861 sdla_peek(&card->hw, addr, tty->flip.char_buf_ptr, len);
3862
3863 tty->flip.char_buf_ptr+=len;
3864 card->tty->flip.count+=len;
3865 for (i=0;i<len;i++){
3866 *tty->flip.flag_buf_ptr = 0;
3867 tty->flip.flag_buf_ptr++;
3868 }
3869
3870 tty->low_latency=1;
3871 tty_flip_buffer_push(tty);
3872 }else{
3873 if (!card->tty_rx){
3874 if (net_ratelimit()){
3875 printk(KERN_INFO
3876 "%s: Receive sync buffer not available!\n",
3877 card->devname);
3878 }
3879 return;
3880 }
3881
3882 if (len > TTY_CHDLC_MAX_MTU){
3883 if (net_ratelimit()){
3884 printk(KERN_INFO
3885 "%s: Received packet size too big: %i bytes, Max: %i!\n",
3886 card->devname,len,TTY_FLIPBUF_SIZE);
3887 }
3888 return;
3889 }
3890
3891
3892 if((addr + len) > card->u.c.rx_top + 1) {
3893 offset = card->u.c.rx_top - addr + 1;
3894
3895 sdla_peek(&card->hw, addr, card->tty_rx, offset);
3896
3897 addr = card->u.c.rx_base;
3898 len -= offset;
3899 }
3900 sdla_peek(&card->hw, addr, card->tty_rx+offset, len);
3901 ld = tty_ldisc_ref(tty);
3902 if (ld) {
3903 if (ld->receive_buf)
3904 ld->receive_buf(tty,card->tty_rx,&fp,olen);
3905 tty_ldisc_deref(ld);
3906 }else{
3907 if (net_ratelimit()){
3908 printk(KERN_INFO
3909 "%s: NO TTY Sync line discipline!\n",
3910 card->devname);
3911 }
3912 }
3913 }
3914
3915 dbg_printk(KERN_INFO "%s: Received Data %i\n",card->devname,olen);
3916 return;
3917}
3918
3919#if 0
3920static int wanpipe_tty_ioctl(struct tty_struct *tty, struct file * file,
3921 unsigned int cmd, unsigned long arg)
3922{
3923 return -ENOIOCTLCMD;
3924}
3925#endif
3926
3927static void wanpipe_tty_stop(struct tty_struct *tty)
3928{
3929 return;
3930}
3931
3932static void wanpipe_tty_start(struct tty_struct *tty)
3933{
3934 return;
3935}
3936
3937static int config_tty (sdla_t *card)
3938{
3939 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
3940
3941 /* Setup the Board for asynchronous mode */
3942 if (card->u.c.async_mode){
3943
3944 if (set_asy_config(card)) {
3945 printk (KERN_INFO "%s: Failed CHDLC Async configuration!\n",
3946 card->devname);
3947 return -EINVAL;
3948 }
3949 }else{
3950 /* Setup the Board for CHDLC */
3951 if (set_chdlc_config(card)) {
3952 printk (KERN_INFO "%s: Failed CHDLC configuration!\n",
3953 card->devname);
3954 return -EINVAL;
3955 }
3956 }
3957
3958 /* Set interrupt mode and mask */
3959 if (chdlc_set_intr_mode(card, APP_INT_ON_RX_FRAME |
3960 APP_INT_ON_GLOBAL_EXCEP_COND |
3961 APP_INT_ON_TX_FRAME |
3962 APP_INT_ON_CHDLC_EXCEP_COND | APP_INT_ON_TIMER)){
3963 printk (KERN_INFO "%s: Failed to set interrupt triggers!\n",
3964 card->devname);
3965 return -EINVAL;
3966 }
3967
3968
3969 /* Mask the Transmit and Timer interrupt */
3970 flags->interrupt_info_struct.interrupt_permission &=
3971 ~(APP_INT_ON_TX_FRAME | APP_INT_ON_TIMER);
3972
3973
3974 /* Enable communications */
3975 if (card->u.c.async_mode){
3976 if (asy_comm_enable(card) != 0) {
3977 printk(KERN_INFO "%s: Failed to enable async commnunication!\n",
3978 card->devname);
3979 flags->interrupt_info_struct.interrupt_permission = 0;
3980 card->u.c.comm_enabled=0;
3981 chdlc_set_intr_mode(card,0);
3982 return -EINVAL;
3983 }
3984 }else{
3985 if (chdlc_comm_enable(card) != 0) {
3986 printk(KERN_INFO "%s: Failed to enable chdlc communications!\n",
3987 card->devname);
3988 flags->interrupt_info_struct.interrupt_permission = 0;
3989 card->u.c.comm_enabled=0;
3990 chdlc_set_intr_mode(card,0);
3991 return -EINVAL;
3992 }
3993 }
3994
3995 /* Initialize Rx/Tx buffer control fields */
3996 init_chdlc_tx_rx_buff(card);
3997 port_set_state(card, WAN_CONNECTING);
3998 return 0;
3999}
4000
4001
4002static int change_speed(sdla_t *card, struct tty_struct *tty,
4003 struct termios *old_termios)
4004{
4005 int baud, ret=0;
4006 unsigned cflag;
4007 int dbits,sbits,parity,handshaking;
4008
4009 cflag = tty->termios->c_cflag;
4010
4011 /* There is always one stop bit */
4012 sbits=WANOPT_ONE;
4013
4014 /* Parity is defaulted to NONE */
4015 parity = WANOPT_NONE;
4016
4017 handshaking=0;
4018
4019 /* byte size and parity */
4020 switch (cflag & CSIZE) {
4021 case CS5: dbits = 5; break;
4022 case CS6: dbits = 6; break;
4023 case CS7: dbits = 7; break;
4024 case CS8: dbits = 8; break;
4025 /* Never happens, but GCC is too dumb to figure it out */
4026 default: dbits = 8; break;
4027 }
4028
4029 /* One more stop bit should be supported, thus increment
4030 * the number of stop bits Max=2 */
4031 if (cflag & CSTOPB) {
4032 sbits = WANOPT_TWO;
4033 }
4034 if (cflag & PARENB) {
4035 parity = WANOPT_EVEN;
4036 }
4037 if (cflag & PARODD){
4038 parity = WANOPT_ODD;
4039 }
4040
4041 /* Determine divisor based on baud rate */
4042 baud = tty_get_baud_rate(tty);
4043
4044 if (!baud)
4045 baud = 9600; /* B0 transition handled in rs_set_termios */
4046
4047 if (cflag & CRTSCTS) {
4048 handshaking|=ASY_RTS_HS_FOR_RX;
4049 }
4050
4051 if (I_IGNPAR(tty))
4052 parity = WANOPT_NONE;
4053
4054 if (I_IXOFF(tty)){
4055 handshaking|=ASY_XON_XOFF_HS_FOR_RX;
4056 handshaking|=ASY_XON_XOFF_HS_FOR_TX;
4057 }
4058
4059 if (I_IXON(tty)){
4060 handshaking|=ASY_XON_XOFF_HS_FOR_RX;
4061 handshaking|=ASY_XON_XOFF_HS_FOR_TX;
4062 }
4063
4064 if (card->u.c.async_mode){
4065 if (card->wandev.bps != baud)
4066 ret=1;
4067 card->wandev.bps = baud;
4068 }
4069
4070 if (card->u.c.async_mode){
4071 if (card->u.c.protocol_options != handshaking)
4072 ret=1;
4073 card->u.c.protocol_options = handshaking;
4074
4075 if (card->u.c.tx_bits_per_char != dbits)
4076 ret=1;
4077 card->u.c.tx_bits_per_char = dbits;
4078
4079 if (card->u.c.rx_bits_per_char != dbits)
4080 ret=1;
4081 card->u.c.rx_bits_per_char = dbits;
4082
4083 if (card->u.c.stop_bits != sbits)
4084 ret=1;
4085 card->u.c.stop_bits = sbits;
4086
4087 if (card->u.c.parity != parity)
4088 ret=1;
4089 card->u.c.parity = parity;
4090
4091 card->u.c.break_timer = 50;
4092 card->u.c.inter_char_timer = 10;
4093 card->u.c.rx_complete_length = 100;
4094 card->u.c.xon_char = 0xFE;
4095 }else{
4096 card->u.c.protocol_options = HDLC_STREAMING_MODE;
4097 }
4098
4099 return ret;
4100}
4101
4102
4103static void wanpipe_tty_set_termios(struct tty_struct *tty, struct termios *old_termios)
4104{
4105 sdla_t *card;
4106 int err=1;
4107
4108 if (!tty){
4109 return;
4110 }
4111
4112 card = (sdla_t *)tty->driver_data;
4113
4114 if (!card)
4115 return;
4116
4117 if (change_speed(card, tty, old_termios) || !card->u.c.comm_enabled){
4118 unsigned long smp_flags;
4119
4120 if (card->u.c.comm_enabled){
4121 lock_adapter_irq(&card->wandev.lock,&smp_flags);
4122 chdlc_disable_comm_shutdown(card);
4123 unlock_adapter_irq(&card->wandev.lock,&smp_flags);
4124 }
4125 lock_adapter_irq(&card->wandev.lock,&smp_flags);
4126 err = config_tty(card);
4127 unlock_adapter_irq(&card->wandev.lock,&smp_flags);
4128 if (card->u.c.async_mode){
4129 printk(KERN_INFO "%s: TTY Async Configuration:\n"
4130 " Baud =%i\n"
4131 " Handshaking =%s\n"
4132 " Tx Dbits =%i\n"
4133 " Rx Dbits =%i\n"
4134 " Parity =%s\n"
4135 " Stop Bits =%i\n",
4136 card->devname,
4137 card->wandev.bps,
4138 opt_decode[card->u.c.protocol_options],
4139 card->u.c.tx_bits_per_char,
4140 card->u.c.rx_bits_per_char,
4141 p_decode[card->u.c.parity] ,
4142 card->u.c.stop_bits);
4143 }else{
4144 printk(KERN_INFO "%s: TTY Sync Configuration:\n"
4145 " Baud =%i\n"
4146 " Protocol =HDLC_STREAMING\n",
4147 card->devname,card->wandev.bps);
4148 }
4149 if (!err){
4150 port_set_state(card,WAN_CONNECTED);
4151 }else{
4152 port_set_state(card,WAN_DISCONNECTED);
4153 }
4154 }
4155 return;
4156}
4157
4158static void wanpipe_tty_put_char(struct tty_struct *tty, unsigned char ch)
4159{
4160 sdla_t *card;
4161 unsigned long smp_flags=0;
4162
4163 if (!tty){
4164 return;
4165 }
4166
4167 card = (sdla_t *)tty->driver_data;
4168
4169 if (!card)
4170 return;
4171
4172 if (card->wandev.state != WAN_CONNECTED)
4173 return;
4174
4175 if(card->hw.type != SDLA_S514)
4176 s508_lock(card,&smp_flags);
4177
4178 if (test_and_set_bit(SEND_CRIT,(void*)&card->wandev.critical)){
4179
4180 wanpipe_tty_trigger_tx_irq(card);
4181
4182 if(card->hw.type != SDLA_S514)
4183 s508_unlock(card,&smp_flags);
4184 return;
4185 }
4186
4187 if (chdlc_send(card,(void*)&ch,1)){
4188 wanpipe_tty_trigger_tx_irq(card);
4189 dbg_printk("%s: Failed to TX char!\n",card->devname);
4190 }
4191
4192 dbg_printk("%s: Char TX OK\n",card->devname);
4193
4194 clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
4195
4196 if(card->hw.type != SDLA_S514)
4197 s508_unlock(card,&smp_flags);
4198
4199 return;
4200}
4201
4202static void wanpipe_tty_flush_chars(struct tty_struct *tty)
4203{
4204 return;
4205}
4206
4207static void wanpipe_tty_flush_buffer(struct tty_struct *tty)
4208{
4209 if (!tty)
4210 return;
4211
4212#if defined(SERIAL_HAVE_POLL_WAIT)
4213 wake_up_interruptible(&tty->poll_wait);
4214#endif
4215 tty_wakeup(tty);
4216 return;
4217}
4218
4219/*
4220 * This function is used to send a high-priority XON/XOFF character to
4221 * the device
4222 */
4223static void wanpipe_tty_send_xchar(struct tty_struct *tty, char ch)
4224{
4225 return;
4226}
4227
4228
4229static int wanpipe_tty_chars_in_buffer(struct tty_struct *tty)
4230{
4231 return 0;
4232}
4233
4234
4235static int wanpipe_tty_write_room(struct tty_struct *tty)
4236{
4237 sdla_t *card;
4238
4239 printk(KERN_INFO "TTY Write Room\n");
4240
4241 if (!tty){
4242 return 0;
4243 }
4244
4245 card = (sdla_t *)tty->driver_data;
4246 if (!card)
4247 return 0;
4248
4249 if (card->wandev.state != WAN_CONNECTED)
4250 return 0;
4251
4252 return SEC_MAX_NO_DATA_BYTES_IN_FRAME;
4253}
4254
4255
4256static int set_modem_status(sdla_t *card, unsigned char data)
4257{
4258 CHDLC_MAILBOX_STRUCT *mb = card->mbox;
4259 int err;
4260
4261 mb->buffer_length=1;
4262 mb->command=SET_MODEM_STATUS;
4263 mb->data[0]=data;
4264 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
4265 if (err != COMMAND_OK)
4266 chdlc_error (card, err, mb);
4267
4268 return err;
4269}
4270
4271static void wanpipe_tty_hangup(struct tty_struct *tty)
4272{
4273 sdla_t *card;
4274 unsigned long smp_flags;
4275
4276 printk(KERN_INFO "TTY Hangup!\n");
4277
4278 if (!tty){
4279 return;
4280 }
4281
4282 card = (sdla_t *)tty->driver_data;
4283 if (!card)
4284 return;
4285
4286 lock_adapter_irq(&card->wandev.lock,&smp_flags);
4287 set_modem_status(card,0);
4288 unlock_adapter_irq(&card->wandev.lock,&smp_flags);
4289 return;
4290}
4291
4292static void wanpipe_tty_break(struct tty_struct *tty, int break_state)
4293{
4294 return;
4295}
4296
4297static void wanpipe_tty_wait_until_sent(struct tty_struct *tty, int timeout)
4298{
4299 return;
4300}
4301
4302static void wanpipe_tty_throttle(struct tty_struct * tty)
4303{
4304 return;
4305}
4306
4307static void wanpipe_tty_unthrottle(struct tty_struct * tty)
4308{
4309 return;
4310}
4311
4312int wanpipe_tty_read_proc(char *page, char **start, off_t off, int count,
4313 int *eof, void *data)
4314{
4315 return 0;
4316}
4317
4318/*
4319 * The serial driver boot-time initialization code!
4320 */
4321int wanpipe_tty_init(sdla_t *card)
4322{
4323 struct serial_state * state;
4324
4325 /* Initialize the tty_driver structure */
4326
4327 if (card->tty_minor < 0 || card->tty_minor > NR_PORTS){
4328 printk(KERN_INFO "%s: Illegal Minor TTY number (0-4): %i\n",
4329 card->devname,card->tty_minor);
4330 return -EINVAL;
4331 }
4332
4333 if (WAN_CARD(card->tty_minor)){
4334 printk(KERN_INFO "%s: TTY Minor %i, already in use\n",
4335 card->devname,card->tty_minor);
4336 return -EBUSY;
4337 }
4338
4339 if (tty_init_cnt==0){
4340
4341 printk(KERN_INFO "%s: TTY %s Driver Init: Major %i, Minor Range %i-%i\n",
4342 card->devname,
4343 card->u.c.async_mode ? "ASYNC" : "SYNC",
4344 WAN_TTY_MAJOR,MIN_PORT,MAX_PORT);
4345
4346 tty_driver_mode = card->u.c.async_mode;
4347
4348 memset(&serial_driver, 0, sizeof(struct tty_driver));
4349 serial_driver.magic = TTY_DRIVER_MAGIC;
4350 serial_driver.owner = THIS_MODULE;
4351 serial_driver.driver_name = "wanpipe_tty";
4352 serial_driver.name = "ttyW";
4353 serial_driver.major = WAN_TTY_MAJOR;
4354 serial_driver.minor_start = WAN_TTY_MINOR;
4355 serial_driver.num = NR_PORTS;
4356 serial_driver.type = TTY_DRIVER_TYPE_SERIAL;
4357 serial_driver.subtype = SERIAL_TYPE_NORMAL;
4358
4359 serial_driver.init_termios = tty_std_termios;
4360 serial_driver.init_termios.c_cflag =
4361 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4362 serial_driver.flags = TTY_DRIVER_REAL_RAW;
4363
4364 serial_driver.refcount = 1; /* !@!@^#^&!! */
4365
4366 serial_driver.open = wanpipe_tty_open;
4367 serial_driver.close = wanpipe_tty_close;
4368 serial_driver.write = wanpipe_tty_write;
4369
4370 serial_driver.put_char = wanpipe_tty_put_char;
4371 serial_driver.flush_chars = wanpipe_tty_flush_chars;
4372 serial_driver.write_room = wanpipe_tty_write_room;
4373 serial_driver.chars_in_buffer = wanpipe_tty_chars_in_buffer;
4374 serial_driver.flush_buffer = wanpipe_tty_flush_buffer;
4375 //serial_driver.ioctl = wanpipe_tty_ioctl;
4376 serial_driver.throttle = wanpipe_tty_throttle;
4377 serial_driver.unthrottle = wanpipe_tty_unthrottle;
4378 serial_driver.send_xchar = wanpipe_tty_send_xchar;
4379 serial_driver.set_termios = wanpipe_tty_set_termios;
4380 serial_driver.stop = wanpipe_tty_stop;
4381 serial_driver.start = wanpipe_tty_start;
4382 serial_driver.hangup = wanpipe_tty_hangup;
4383 serial_driver.break_ctl = wanpipe_tty_break;
4384 serial_driver.wait_until_sent = wanpipe_tty_wait_until_sent;
4385 serial_driver.read_proc = wanpipe_tty_read_proc;
4386
4387 if (tty_register_driver(&serial_driver)){
4388 printk(KERN_INFO "%s: Failed to register serial driver!\n",
4389 card->devname);
4390 }
4391 }
4392
4393
4394 /* The subsequent ports must comply to the initial configuration */
4395 if (tty_driver_mode != card->u.c.async_mode){
4396 printk(KERN_INFO "%s: Error: TTY Driver operation mode mismatch!\n",
4397 card->devname);
4398 printk(KERN_INFO "%s: The TTY driver is configured for %s!\n",
4399 card->devname, tty_driver_mode ? "ASYNC" : "SYNC");
4400 return -EINVAL;
4401 }
4402
4403 tty_init_cnt++;
4404
4405 printk(KERN_INFO "%s: Initializing TTY %s Driver Minor %i\n",
4406 card->devname,
4407 tty_driver_mode ? "ASYNC" : "SYNC",
4408 card->tty_minor);
4409
4410 tty_card_map[card->tty_minor] = card;
4411 state = &rs_table[card->tty_minor];
4412
4413 state->magic = SSTATE_MAGIC;
4414 state->line = 0;
4415 state->type = PORT_UNKNOWN;
4416 state->custom_divisor = 0;
4417 state->close_delay = 5*HZ/10;
4418 state->closing_wait = 30*HZ;
4419 state->icount.cts = state->icount.dsr =
4420 state->icount.rng = state->icount.dcd = 0;
4421 state->icount.rx = state->icount.tx = 0;
4422 state->icount.frame = state->icount.parity = 0;
4423 state->icount.overrun = state->icount.brk = 0;
4424 state->irq = card->wandev.irq;
4425
4426 INIT_WORK(&card->tty_work, tty_poll_work, (void*)card);
4427 return 0;
4428}
4429
4430
4431MODULE_LICENSE("GPL");
4432
4433/****** End ****************************************************************/
diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c
new file mode 100644
index 000000000000..2efccb0554c0
--- /dev/null
+++ b/drivers/net/wan/sdla_fr.c
@@ -0,0 +1,5068 @@
1/*****************************************************************************
2* sdla_fr.c WANPIPE(tm) Multiprotocol WAN Link Driver. Frame relay module.
3*
4* Author(s): Nenad Corbic <ncorbic@sangoma.com>
5* Gideon Hack
6*
7* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
8*
9* This program is free software; you can redistribute it and/or
10* modify it under the terms of the GNU General Public License
11* as published by the Free Software Foundation; either version
12* 2 of the License, or (at your option) any later version.
13* ============================================================================
14* Nov 23, 2000 Nenad Corbic o Added support for 2.4.X kernels
15* Nov 15, 2000 David Rokavarg
16* Nenad Corbic o Added frame relay bridging support.
17* Original code from Mark Wells and Kristian Hoffmann has
18* been integrated into the frame relay driver.
19* Nov 13, 2000 Nenad Corbic o Added true interface type encoding option.
20* Tcpdump doesn't support Frame Relay inteface
21* types, to fix this true type option will set
22* the interface type to RAW IP mode.
23* Nov 07, 2000 Nenad Corbic o Added security features for UDP debugging:
24* Deny all and specify allowed requests.
25* Nov 06, 2000 Nenad Corbic o Wanpipe interfaces conform to raw packet interfaces.
26* Moved the if_header into the if_send() routine.
27* The if_header() was breaking the libpcap
28* support. i.e. support for tcpdump, ethereal ...
29* Oct 12. 2000 Nenad Corbic o Added error message in fr_configure
30* Jul 31, 2000 Nenad Corbic o Fixed the Router UP Time.
31* Apr 28, 2000 Nenad Corbic o Added the option to shutdown an interface
32* when the channel gets disconnected.
33* Apr 28, 2000 Nenad Corbic o Added M.Grants patch: disallow duplicate
34* interface setups.
35* Apr 25, 2000 Nenad Corbic o Added M.Grants patch: dynamically add/remove
36* new dlcis/interfaces.
37* Mar 23, 2000 Nenad Corbic o Improved task queue, bh handling.
38* Mar 16, 2000 Nenad Corbic o Added Inverse ARP support
39* Mar 13, 2000 Nenad Corbic o Added new socket API support.
40* Mar 06, 2000 Nenad Corbic o Bug Fix: corrupted mbox recovery.
41* Feb 24, 2000 Nenad Corbic o Fixed up FT1 UDP debugging problem.
42* Dev 15, 1999 Nenad Corbic o Fixed up header files for 2.0.X kernels
43*
44* Nov 08, 1999 Nenad Corbic o Combined all debug UDP calls into one function
45* o Removed the ARP support. This has to be done
46* in the next version.
47* o Only a Node can implement NO signalling.
48* Initialize DLCI during if_open() if NO
49* signalling.
50* o Took out IPX support, implement in next
51* version
52* Sep 29, 1999 Nenad Corbic o Added SMP support and changed the update
53* function to use timer interrupt.
54* o Fixed the CIR bug: Set the value of BC
55* to CIR when the CIR is enabled.
56* o Updated comments, statistics and tracing.
57* Jun 02, 1999 Gideon Hack o Updated for S514 support.
58* Sep 18, 1998 Jaspreet Singh o Updated for 2.2.X kernels.
59* Jul 31, 1998 Jaspreet Singh o Removed wpf_poll routine. The channel/DLCI
60* status is received through an event interrupt.
61* Jul 08, 1998 David Fong o Added inverse ARP support.
62* Mar 26, 1997 Jaspreet Singh o Returning return codes for failed UDP cmds.
63* Jan 28, 1997 Jaspreet Singh o Improved handling of inactive DLCIs.
64* Dec 30, 1997 Jaspreet Singh o Replaced dev_tint() with mark_bh(NET_BH)
65* Dec 16, 1997 Jaspreet Singh o Implemented Multiple IPX support.
66* Nov 26, 1997 Jaspreet Singh o Improved load sharing with multiple boards
67* o Added Cli() to protect enabling of interrupts
68* while polling is called.
69* Nov 24, 1997 Jaspreet Singh o Added counters to avoid enabling of interrupts
70* when they have been disabled by another
71* interface or routine (eg. wpf_poll).
72* Nov 06, 1997 Jaspreet Singh o Added INTR_TEST_MODE to avoid polling
73* routine disable interrupts during interrupt
74* testing.
75* Oct 20, 1997 Jaspreet Singh o Added hooks in for Router UP time.
76* Oct 16, 1997 Jaspreet Singh o The critical flag is used to maintain flow
77* control by avoiding RACE conditions. The
78* cli() and restore_flags() are taken out.
79* The fr_channel structure is appended for
80* Driver Statistics.
81* Oct 15, 1997 Farhan Thawar o updated if_send() and receive for IPX
82* Aug 29, 1997 Farhan Thawar o Removed most of the cli() and sti()
83* o Abstracted the UDP management stuff
84* o Now use tbusy and critical more intelligently
85* Jul 21, 1997 Jaspreet Singh o Can configure T391, T392, N391, N392 & N393
86* through router.conf.
87* o Protected calls to sdla_peek() by adDing
88* save_flags(), cli() and restore_flags().
89* o Added error message for Inactive DLCIs in
90* fr_event() and update_chan_state().
91* o Fixed freeing up of buffers using kfree()
92* when packets are received.
93* Jul 07, 1997 Jaspreet Singh o Added configurable TTL for UDP packets
94* o Added ability to discard multicast and
95* broadcast source addressed packets
96* Jun 27, 1997 Jaspreet Singh o Added FT1 monitor capabilities
97* New case (0x44) statement in if_send routine
98* Added a global variable rCount to keep track
99* of FT1 status enabled on the board.
100* May 29, 1997 Jaspreet Singh o Fixed major Flow Control Problem
101* With multiple boards a problem was seen where
102* the second board always stopped transmitting
103* packet after running for a while. The code
104* got into a stage where the interrupts were
105* disabled and dev->tbusy was set to 1.
106* This caused the If_send() routine to get into
107* the if clause for it(0,dev->tbusy)
108* forever.
109* The code got into this stage due to an
110* interrupt occurring within the if clause for
111* set_bit(0,dev->tbusy). Since an interrupt
112* disables furhter transmit interrupt and
113* makes dev->tbusy = 0, this effect was undone
114* by making dev->tbusy = 1 in the if clause.
115* The Fix checks to see if Transmit interrupts
116* are disabled then do not make dev->tbusy = 1
117* Introduced a global variable: int_occur and
118* added tx_int_enabled in the wan_device
119* structure.
120* May 21, 1997 Jaspreet Singh o Fixed UDP Management for multiple
121* boards.
122*
123* Apr 25, 1997 Farhan Thawar o added UDP Management stuff
124* o fixed bug in if_send() and tx_intr() to
125* sleep and wakeup all devices
126* Mar 11, 1997 Farhan Thawar Version 3.1.1
127* o fixed (+1) bug in fr508_rx_intr()
128* o changed if_send() to return 0 if
129* wandev.critical() is true
130* o free socket buffer in if_send() if
131* returning 0
132* o added tx_intr() routine
133* Jan 30, 1997 Gene Kozin Version 3.1.0
134* o implemented exec() entry point
135* o fixed a bug causing driver configured as
136* a FR switch to be stuck in WAN_
137* mode
138* Jan 02, 1997 Gene Kozin Initial version.
139*****************************************************************************/
140
141#include <linux/module.h>
142#include <linux/kernel.h> /* printk(), and other useful stuff */
143#include <linux/stddef.h> /* offsetof(), etc. */
144#include <linux/errno.h> /* return codes */
145#include <linux/string.h> /* inline memset(), etc. */
146#include <linux/slab.h> /* kmalloc(), kfree() */
147#include <linux/wanrouter.h> /* WAN router definitions */
148#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
149#include <linux/workqueue.h>
150#include <linux/if_arp.h> /* ARPHRD_* defines */
151#include <asm/byteorder.h> /* htons(), etc. */
152#include <asm/io.h> /* for inb(), outb(), etc. */
153#include <linux/time.h> /* for do_gettimeofday */
154#include <linux/in.h> /* sockaddr_in */
155#include <asm/errno.h>
156
157#include <linux/ip.h>
158#include <linux/if.h>
159
160#include <linux/if_wanpipe_common.h> /* Wanpipe Socket */
161#include <linux/if_wanpipe.h>
162
163#include <linux/sdla_fr.h> /* frame relay firmware API definitions */
164
165#include <asm/uaccess.h>
166#include <linux/inetdevice.h>
167#include <linux/netdevice.h>
168
169#include <net/route.h> /* Dynamic Route Creation */
170#include <linux/etherdevice.h> /* eth_type_trans() used for bridging */
171#include <linux/random.h>
172
173/****** Defines & Macros ****************************************************/
174
175#define MAX_CMD_RETRY 10 /* max number of firmware retries */
176
177#define FR_HEADER_LEN 8 /* max encapsulation header size */
178#define FR_CHANNEL_MTU 1500 /* unfragmented logical channel MTU */
179
180/* Q.922 frame types */
181#define Q922_UI 0x03 /* Unnumbered Info frame */
182#define Q922_XID 0xAF
183
184/* DLCI configured or not */
185#define DLCI_NOT_CONFIGURED 0x00
186#define DLCI_CONFIG_PENDING 0x01
187#define DLCI_CONFIGURED 0x02
188
189/* CIR enabled or not */
190#define CIR_ENABLED 0x00
191#define CIR_DISABLED 0x01
192
193#define FRAME_RELAY_API 1
194#define MAX_BH_BUFF 10
195
196/* For handle_IPXWAN() */
197#define CVHexToAscii(b) (((unsigned char)(b) > (unsigned char)9) ? ((unsigned char)'A' + ((unsigned char)(b) - (unsigned char)10)) : ((unsigned char)'0' + (unsigned char)(b)))
198
199/****** Data Structures *****************************************************/
200
201/* This is an extention of the 'struct device' we create for each network
202 * interface to keep the rest of channel-specific data.
203 */
204typedef struct fr_channel
205{
206 wanpipe_common_t common;
207 char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */
208 unsigned dlci_configured ; /* check whether configured or not */
209 unsigned cir_status; /* check whether CIR enabled or not */
210 unsigned dlci; /* logical channel number */
211 unsigned cir; /* committed information rate */
212 unsigned bc; /* committed burst size */
213 unsigned be; /* excess burst size */
214 unsigned mc; /* multicast support on or off */
215 unsigned tx_int_status; /* Transmit Interrupt Status */
216 unsigned short pkt_length; /* Packet Length */
217 unsigned long router_start_time;/* Router start time in seconds */
218 unsigned long tick_counter; /* counter for transmit time out */
219 char dev_pending_devtint; /* interface pending dev_tint() */
220 void *dlci_int_interface; /* pointer to the DLCI Interface */
221 unsigned long IB_addr; /* physical address of Interface Byte */
222 unsigned long state_tick; /* time of the last state change */
223 unsigned char enable_IPX; /* Enable/Disable the use of IPX */
224 unsigned long network_number; /* Internal Network Number for IPX*/
225 sdla_t *card; /* -> owner */
226 unsigned route_flag; /* Add/Rem dest addr in route tables */
227 unsigned inarp; /* Inverse Arp Request status */
228 long inarp_ready; /* Ready to send requests */
229 int inarp_interval; /* Time between InArp Requests */
230 unsigned long inarp_tick; /* InArp jiffies tick counter */
231 long interface_down; /* Bring interface down on disconnect */
232 struct net_device_stats ifstats; /* interface statistics */
233 if_send_stat_t drvstats_if_send;
234 rx_intr_stat_t drvstats_rx_intr;
235 pipe_mgmt_stat_t drvstats_gen;
236 unsigned long router_up_time;
237
238 unsigned short transmit_length;
239 struct sk_buff *delay_skb;
240
241 bh_data_t *bh_head; /* Circular buffer for chdlc_bh */
242 unsigned long tq_working;
243 volatile int bh_write;
244 volatile int bh_read;
245 atomic_t bh_buff_used;
246
247 /* Polling task queue. Each interface
248 * has its own task queue, which is used
249 * to defer events from the interrupt */
250 struct work_struct fr_poll_work;
251 struct timer_list fr_arp_timer;
252
253 u32 ip_local;
254 u32 ip_remote;
255 long config_dlci;
256 long unconfig_dlci;
257
258 /* Whether this interface should be setup as a gateway.
259 * Used by dynamic route setup code */
260 u8 gateway;
261
262 /* True interface type */
263 u8 true_if_encoding;
264 u8 fr_header[FR_HEADER_LEN];
265 char fr_header_len;
266
267} fr_channel_t;
268
269/* Route Flag options */
270#define NO_ROUTE 0x00
271#define ADD_ROUTE 0x01
272#define ROUTE_ADDED 0x02
273#define REMOVE_ROUTE 0x03
274#define ARP_REQ 0x04
275
276/* inarp options */
277#define INARP_NONE 0x00
278#define INARP_REQUEST 0x01
279#define INARP_CONFIGURED 0x02
280
281/* reasons for enabling the timer interrupt on the adapter */
282#define TMR_INT_ENABLED_UDP 0x01
283#define TMR_INT_ENABLED_UPDATE 0x02
284#define TMR_INT_ENABLED_ARP 0x04
285#define TMR_INT_ENABLED_UPDATE_STATE 0x08
286#define TMR_INT_ENABLED_CONFIG 0x10
287#define TMR_INT_ENABLED_UNCONFIG 0x20
288
289
290typedef struct dlci_status
291{
292 unsigned short dlci PACKED;
293 unsigned char state PACKED;
294} dlci_status_t;
295
296typedef struct dlci_IB_mapping
297{
298 unsigned short dlci PACKED;
299 unsigned long addr_value PACKED;
300} dlci_IB_mapping_t;
301
302/* This structure is used for DLCI list Tx interrupt mode. It is used to
303 enable interrupt bit and set the packet length for transmission
304 */
305typedef struct fr_dlci_interface
306{
307 unsigned char gen_interrupt PACKED;
308 unsigned short packet_length PACKED;
309 unsigned char reserved PACKED;
310} fr_dlci_interface_t;
311
312/* variable for keeping track of enabling/disabling FT1 monitor status */
313static int rCount = 0;
314
315extern void disable_irq(unsigned int);
316extern void enable_irq(unsigned int);
317
318/* variable for keeping track of number of interrupts generated during
319 * interrupt test routine
320 */
321static int Intr_test_counter;
322
323/****** Function Prototypes *************************************************/
324
325/* WAN link driver entry points. These are called by the WAN router module. */
326static int update(struct wan_device *wandev);
327static int new_if(struct wan_device *wandev, struct net_device *dev,
328 wanif_conf_t *conf);
329static int del_if(struct wan_device *wandev, struct net_device *dev);
330static void disable_comm (sdla_t *card);
331
332/* WANPIPE-specific entry points */
333static int wpf_exec(struct sdla *card, void *u_cmd, void *u_data);
334
335/* Network device interface */
336static int if_init(struct net_device *dev);
337static int if_open(struct net_device *dev);
338static int if_close(struct net_device *dev);
339
340static void if_tx_timeout(struct net_device *dev);
341
342static int if_rebuild_hdr (struct sk_buff *skb);
343
344static int if_send(struct sk_buff *skb, struct net_device *dev);
345static int chk_bcast_mcast_addr(sdla_t *card, struct net_device* dev,
346 struct sk_buff *skb);
347static struct net_device_stats *if_stats(struct net_device *dev);
348
349/* Interrupt handlers */
350static void fr_isr(sdla_t *card);
351static void rx_intr(sdla_t *card);
352static void tx_intr(sdla_t *card);
353static void timer_intr(sdla_t *card);
354static void spur_intr(sdla_t *card);
355
356/* Frame relay firmware interface functions */
357static int fr_read_version(sdla_t *card, char *str);
358static int fr_configure(sdla_t *card, fr_conf_t *conf);
359static int fr_dlci_configure(sdla_t *card, fr_dlc_conf_t *conf, unsigned dlci);
360static int fr_init_dlci (sdla_t *card, fr_channel_t *chan);
361static int fr_set_intr_mode (sdla_t *card, unsigned mode, unsigned mtu, unsigned short timeout);
362static int fr_comm_enable(sdla_t *card);
363static void fr_comm_disable(sdla_t *card);
364static int fr_get_err_stats(sdla_t *card);
365static int fr_get_stats(sdla_t *card);
366static int fr_add_dlci(sdla_t *card, int dlci);
367static int fr_activate_dlci(sdla_t *card, int dlci);
368static int fr_delete_dlci (sdla_t* card, int dlci);
369static int fr_issue_isf(sdla_t *card, int isf);
370static int fr_send(sdla_t *card, int dlci, unsigned char attr, int len,
371 void *buf);
372static int fr_send_data_header(sdla_t *card, int dlci, unsigned char attr, int len,
373 void *buf,unsigned char hdr_len);
374static unsigned int fr_send_hdr(sdla_t *card, int dlci, unsigned int offset);
375
376static int check_dlci_config (sdla_t *card, fr_channel_t *chan);
377static void initialize_rx_tx_buffers (sdla_t *card);
378
379
380/* Firmware asynchronous event handlers */
381static int fr_event(sdla_t *card, int event, fr_mbox_t *mbox);
382static int fr_modem_failure(sdla_t *card, fr_mbox_t *mbox);
383static int fr_dlci_change(sdla_t *card, fr_mbox_t *mbox);
384
385/* Miscellaneous functions */
386static int update_chan_state(struct net_device *dev);
387static void set_chan_state(struct net_device *dev, int state);
388static struct net_device *find_channel(sdla_t *card, unsigned dlci);
389static int is_tx_ready(sdla_t *card, fr_channel_t *chan);
390static unsigned int dec_to_uint(unsigned char *str, int len);
391static int reply_udp( unsigned char *data, unsigned int mbox_len );
392
393static int intr_test( sdla_t* card );
394static void init_chan_statistics( fr_channel_t* chan );
395static void init_global_statistics( sdla_t* card );
396static void read_DLCI_IB_mapping( sdla_t* card, fr_channel_t* chan );
397static int setup_for_delayed_transmit(struct net_device* dev,
398 struct sk_buff *skb);
399
400struct net_device *move_dev_to_next(sdla_t *card, struct net_device *dev);
401static int check_tx_status(sdla_t *card, struct net_device *dev);
402
403/* Frame Relay Socket API */
404static void trigger_fr_bh (fr_channel_t *);
405static void fr_bh(struct net_device *dev);
406static int fr_bh_cleanup(struct net_device *dev);
407static int bh_enqueue(struct net_device *dev, struct sk_buff *skb);
408
409static void trigger_fr_poll(struct net_device *dev);
410static void fr_poll(struct net_device *dev);
411//static void add_gateway(struct net_device *dev);
412
413static void trigger_unconfig_fr(struct net_device *dev);
414static void unconfig_fr (sdla_t *);
415
416static void trigger_config_fr (sdla_t *);
417static void config_fr (sdla_t *);
418
419
420/* Inverse ARP and Dynamic routing functions */
421int process_ARP(arphdr_1490_t *ArpPacket, sdla_t *card, struct net_device *dev);
422int is_arp(void *buf);
423int send_inarp_request(sdla_t *card, struct net_device *dev);
424
425static void trigger_fr_arp(struct net_device *dev);
426static void fr_arp (unsigned long data);
427
428
429/* Udp management functions */
430static int process_udp_mgmt_pkt(sdla_t *card);
431static int udp_pkt_type( struct sk_buff *skb, sdla_t *card );
432static int store_udp_mgmt_pkt(int udp_type, char udp_pkt_src, sdla_t* card,
433 struct sk_buff *skb, int dlci);
434
435/* IPX functions */
436static void switch_net_numbers(unsigned char *sendpacket,
437 unsigned long network_number, unsigned char incoming);
438
439static int handle_IPXWAN(unsigned char *sendpacket, char *devname,
440 unsigned char enable_IPX, unsigned long network_number);
441
442/* Lock Functions: SMP supported */
443void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags);
444void s508_s514_lock(sdla_t *card, unsigned long *smp_flags);
445
446unsigned short calc_checksum (char *, int);
447static int setup_fr_header(struct sk_buff** skb,
448 struct net_device* dev, char op_mode);
449
450
451/****** Public Functions ****************************************************/
452
453/*============================================================================
454 * Frame relay protocol initialization routine.
455 *
456 * This routine is called by the main WANPIPE module during setup. At this
457 * point adapter is completely initialized and firmware is running.
458 * o read firmware version (to make sure it's alive)
459 * o configure adapter
460 * o initialize protocol-specific fields of the adapter data space.
461 *
462 * Return: 0 o.k.
463 * < 0 failure.
464 */
465int wpf_init(sdla_t *card, wandev_conf_t *conf)
466{
467
468 int err;
469 fr508_flags_t* flags;
470
471 union
472 {
473 char str[80];
474 fr_conf_t cfg;
475 } u;
476
477 fr_buf_info_t* buf_info;
478 int i;
479
480
481 printk(KERN_INFO "\n");
482
483 /* Verify configuration ID */
484 if (conf->config_id != WANCONFIG_FR) {
485
486 printk(KERN_INFO "%s: invalid configuration ID %u!\n",
487 card->devname, conf->config_id);
488 return -EINVAL;
489
490 }
491
492 /* Initialize protocol-specific fields of adapter data space */
493 switch (card->hw.fwid) {
494
495 case SFID_FR508:
496 card->mbox = (void*)(card->hw.dpmbase +
497 FR508_MBOX_OFFS);
498 card->flags = (void*)(card->hw.dpmbase +
499 FR508_FLAG_OFFS);
500 if(card->hw.type == SDLA_S514) {
501 card->mbox += FR_MB_VECTOR;
502 card->flags += FR_MB_VECTOR;
503 }
504 card->isr = &fr_isr;
505 break;
506
507 default:
508 return -EINVAL;
509 }
510
511 flags = card->flags;
512
513 /* Read firmware version. Note that when adapter initializes, it
514 * clears the mailbox, so it may appear that the first command was
515 * executed successfully when in fact it was merely erased. To work
516 * around this, we execute the first command twice.
517 */
518
519 if (fr_read_version(card, NULL) || fr_read_version(card, u.str))
520 return -EIO;
521
522 printk(KERN_INFO "%s: running frame relay firmware v%s\n",
523 card->devname, u.str);
524
525 /* Adjust configuration */
526 conf->mtu += FR_HEADER_LEN;
527 conf->mtu = (conf->mtu >= MIN_LGTH_FR_DATA_CFG) ?
528 min_t(unsigned int, conf->mtu, FR_MAX_NO_DATA_BYTES_IN_FRAME) :
529 FR_CHANNEL_MTU + FR_HEADER_LEN;
530
531 conf->bps = min_t(unsigned int, conf->bps, 2048000);
532
533 /* Initialze the configuration structure sent to the board to zero */
534 memset(&u.cfg, 0, sizeof(u.cfg));
535
536 memset(card->u.f.dlci_to_dev_map, 0, sizeof(card->u.f.dlci_to_dev_map));
537
538 /* Configure adapter firmware */
539
540 u.cfg.mtu = conf->mtu;
541 u.cfg.kbps = conf->bps / 1000;
542
543 u.cfg.cir_fwd = u.cfg.cir_bwd = 16;
544 u.cfg.bc_fwd = u.cfg.bc_bwd = 16;
545
546 u.cfg.options = 0x0000;
547 printk(KERN_INFO "%s: Global CIR enabled by Default\n", card->devname);
548
549 switch (conf->u.fr.signalling) {
550
551 case WANOPT_FR_ANSI:
552 u.cfg.options = 0x0000;
553 break;
554
555 case WANOPT_FR_Q933:
556 u.cfg.options |= 0x0200;
557 break;
558
559 case WANOPT_FR_LMI:
560 u.cfg.options |= 0x0400;
561 break;
562
563 case WANOPT_NO:
564 u.cfg.options |= 0x0800;
565 break;
566 default:
567 printk(KERN_INFO "%s: Illegal Signalling option\n",
568 card->wandev.name);
569 return -EINVAL;
570 }
571
572
573 card->wandev.signalling = conf->u.fr.signalling;
574
575 if (conf->station == WANOPT_CPE) {
576
577
578 if (conf->u.fr.signalling == WANOPT_NO){
579 printk(KERN_INFO
580 "%s: ERROR - For NO signalling, station must be set to Node!",
581 card->devname);
582 return -EINVAL;
583 }
584
585 u.cfg.station = 0;
586 u.cfg.options |= 0x8000; /* auto config DLCI */
587 card->u.f.dlci_num = 0;
588
589 } else {
590
591 u.cfg.station = 1; /* switch emulation mode */
592
593 /* For switch emulation we have to create a list of dlci(s)
594 * that will be sent to be global SET_DLCI_CONFIGURATION
595 * command in fr_configure() routine.
596 */
597
598 card->u.f.dlci_num = min_t(unsigned int, max_t(unsigned int, conf->u.fr.dlci_num, 1), 100);
599
600 for ( i = 0; i < card->u.f.dlci_num; i++) {
601
602 card->u.f.node_dlci[i] = (unsigned short)
603 conf->u.fr.dlci[i] ? conf->u.fr.dlci[i] : 16;
604
605 }
606 }
607
608 if (conf->clocking == WANOPT_INTERNAL)
609 u.cfg.port |= 0x0001;
610
611 if (conf->interface == WANOPT_RS232)
612 u.cfg.port |= 0x0002;
613
614 if (conf->u.fr.t391)
615 u.cfg.t391 = min_t(unsigned int, conf->u.fr.t391, 30);
616 else
617 u.cfg.t391 = 5;
618
619 if (conf->u.fr.t392)
620 u.cfg.t392 = min_t(unsigned int, conf->u.fr.t392, 30);
621 else
622 u.cfg.t392 = 15;
623
624 if (conf->u.fr.n391)
625 u.cfg.n391 = min_t(unsigned int, conf->u.fr.n391, 255);
626 else
627 u.cfg.n391 = 2;
628
629 if (conf->u.fr.n392)
630 u.cfg.n392 = min_t(unsigned int, conf->u.fr.n392, 10);
631 else
632 u.cfg.n392 = 3;
633
634 if (conf->u.fr.n393)
635 u.cfg.n393 = min_t(unsigned int, conf->u.fr.n393, 10);
636 else
637 u.cfg.n393 = 4;
638
639 if (fr_configure(card, &u.cfg))
640 return -EIO;
641
642 if (card->hw.type == SDLA_S514) {
643
644 buf_info = (void*)(card->hw.dpmbase + FR_MB_VECTOR +
645 FR508_RXBC_OFFS);
646
647 card->rxmb = (void*)(buf_info->rse_next + card->hw.dpmbase);
648
649 card->u.f.rxmb_base =
650 (void*)(buf_info->rse_base + card->hw.dpmbase);
651
652 card->u.f.rxmb_last =
653 (void*)(buf_info->rse_base +
654 (buf_info->rse_num - 1) * sizeof(fr_rx_buf_ctl_t) +
655 card->hw.dpmbase);
656 }else{
657 buf_info = (void*)(card->hw.dpmbase + FR508_RXBC_OFFS);
658
659 card->rxmb = (void*)(buf_info->rse_next -
660 FR_MB_VECTOR + card->hw.dpmbase);
661
662 card->u.f.rxmb_base =
663 (void*)(buf_info->rse_base -
664 FR_MB_VECTOR + card->hw.dpmbase);
665
666 card->u.f.rxmb_last =
667 (void*)(buf_info->rse_base +
668 (buf_info->rse_num - 1) * sizeof(fr_rx_buf_ctl_t) -
669 FR_MB_VECTOR + card->hw.dpmbase);
670 }
671
672 card->u.f.rx_base = buf_info->buf_base;
673 card->u.f.rx_top = buf_info->buf_top;
674
675 card->u.f.tx_interrupts_pending = 0;
676
677 card->wandev.mtu = conf->mtu;
678 card->wandev.bps = conf->bps;
679 card->wandev.interface = conf->interface;
680 card->wandev.clocking = conf->clocking;
681 card->wandev.station = conf->station;
682 card->poll = NULL;
683 card->exec = &wpf_exec;
684 card->wandev.update = &update;
685 card->wandev.new_if = &new_if;
686 card->wandev.del_if = &del_if;
687 card->wandev.state = WAN_DISCONNECTED;
688 card->wandev.ttl = conf->ttl;
689 card->wandev.udp_port = conf->udp_port;
690 card->disable_comm = &disable_comm;
691 card->u.f.arp_dev = NULL;
692
693 /* Intialize global statistics for a card */
694 init_global_statistics( card );
695
696 card->TracingEnabled = 0;
697
698 /* Interrupt Test */
699 Intr_test_counter = 0;
700 card->intr_mode = INTR_TEST_MODE;
701 err = intr_test( card );
702
703 printk(KERN_INFO "%s: End of Interrupt Test rc=0x%x count=%i\n",
704 card->devname,err,Intr_test_counter);
705
706 if (err || (Intr_test_counter < MAX_INTR_TEST_COUNTER)) {
707 printk(KERN_ERR "%s: Interrupt Test Failed, Counter: %i\n",
708 card->devname, Intr_test_counter);
709 printk(KERN_ERR "Please choose another interrupt\n");
710 err = -EIO;
711 return err;
712 }
713
714 printk(KERN_INFO "%s: Interrupt Test Passed, Counter: %i\n",
715 card->devname, Intr_test_counter);
716
717
718 /* Apr 28 2000. Nenad Corbic
719 * Enable commnunications here, not in if_open or new_if, since
720 * interfaces come down when the link is disconnected.
721 */
722
723 /* If you enable comms and then set ints, you get a Tx int as you
724 * perform the SET_INT_TRIGGERS command. So, we only set int
725 * triggers and then adjust the interrupt mask (to disable Tx ints)
726 * before enabling comms.
727 */
728 if (fr_set_intr_mode(card, (FR_INTR_RXRDY | FR_INTR_TXRDY |
729 FR_INTR_DLC | FR_INTR_TIMER | FR_INTR_TX_MULT_DLCIs) ,
730 card->wandev.mtu, 0)) {
731 return -EIO;
732 }
733
734 flags->imask &= ~(FR_INTR_TXRDY | FR_INTR_TIMER);
735
736 if (fr_comm_enable(card)) {
737 return -EIO;
738 }
739 wanpipe_set_state(card, WAN_CONNECTED);
740 spin_lock_init(&card->u.f.if_send_lock);
741
742 printk(KERN_INFO "\n");
743
744 return 0;
745}
746
747/******* WAN Device Driver Entry Points *************************************/
748
749/*============================================================================
750 * Update device status & statistics.
751 */
752static int update(struct wan_device* wandev)
753{
754 volatile sdla_t* card;
755 unsigned long timeout;
756 fr508_flags_t* flags;
757
758 /* sanity checks */
759 if ((wandev == NULL) || (wandev->private == NULL))
760 return -EFAULT;
761
762 if (wandev->state == WAN_UNCONFIGURED)
763 return -ENODEV;
764
765 card = wandev->private;
766 flags = card->flags;
767
768
769 card->u.f.update_comms_stats = 1;
770 card->u.f.timer_int_enabled |= TMR_INT_ENABLED_UPDATE;
771 flags->imask |= FR_INTR_TIMER;
772 timeout = jiffies;
773 for(;;) {
774 if(card->u.f.update_comms_stats == 0)
775 break;
776 if ((jiffies - timeout) > (1 * HZ)){
777 card->u.f.update_comms_stats = 0;
778 return -EAGAIN;
779 }
780 }
781
782 return 0;
783}
784
785/*============================================================================
786 * Create new logical channel.
787 * This routine is called by the router when ROUTER_IFNEW IOCTL is being
788 * handled.
789 * o parse media- and hardware-specific configuration
790 * o make sure that a new channel can be created
791 * o allocate resources, if necessary
792 * o prepare network device structure for registaration.
793 *
794 * Return: 0 o.k.
795 * < 0 failure (channel will not be created)
796 */
797static int new_if(struct wan_device* wandev, struct net_device* dev,
798 wanif_conf_t* conf)
799{
800 sdla_t* card = wandev->private;
801 fr_channel_t* chan;
802 int dlci = 0;
803 int err = 0;
804
805
806 if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)) {
807
808 printk(KERN_INFO "%s: Invalid interface name!\n",
809 card->devname);
810 return -EINVAL;
811 }
812
813 /* allocate and initialize private data */
814 chan = kmalloc(sizeof(fr_channel_t), GFP_KERNEL);
815
816 if (chan == NULL)
817 return -ENOMEM;
818
819 memset(chan, 0, sizeof(fr_channel_t));
820 strcpy(chan->name, conf->name);
821 chan->card = card;
822
823 /* verify media address */
824 if (is_digit(conf->addr[0])) {
825
826 dlci = dec_to_uint(conf->addr, 0);
827
828 if (dlci && (dlci <= HIGHEST_VALID_DLCI)) {
829
830 chan->dlci = dlci;
831
832 } else {
833
834 printk(KERN_ERR
835 "%s: Invalid DLCI %u on interface %s!\n",
836 wandev->name, dlci, chan->name);
837 err = -EINVAL;
838 }
839
840 } else {
841 printk(KERN_ERR
842 "%s: Invalid media address on interface %s!\n",
843 wandev->name, chan->name);
844 err = -EINVAL;
845 }
846
847 if ((chan->true_if_encoding = conf->true_if_encoding) == WANOPT_YES){
848 printk(KERN_INFO
849 "%s: Enabling, true interface type encoding.\n",
850 card->devname);
851 }
852
853
854
855 /* Setup wanpipe as a router (WANPIPE) even if it is
856 * a bridged DLCI, or as an API
857 */
858 if (strcmp(conf->usedby, "WANPIPE") == 0 ||
859 strcmp(conf->usedby, "BRIDGE") == 0 ||
860 strcmp(conf->usedby, "BRIDGE_N") == 0){
861
862 if(strcmp(conf->usedby, "WANPIPE") == 0){
863 chan->common.usedby = WANPIPE;
864
865 printk(KERN_INFO "%s: Running in WANPIPE mode.\n",
866 card->devname);
867
868 }else if(strcmp(conf->usedby, "BRIDGE") == 0){
869
870 chan->common.usedby = BRIDGE;
871
872 printk(KERN_INFO "%s: Running in WANPIPE (BRIDGE) mode.\n",
873 card->devname);
874 }else if( strcmp(conf->usedby, "BRIDGE_N") == 0 ){
875
876 chan->common.usedby = BRIDGE_NODE;
877
878 printk(KERN_INFO "%s: Running in WANPIPE (BRIDGE_NODE) mode.\n",
879 card->devname);
880 }
881
882 if (!err){
883 /* Dynamic interface configuration option.
884 * On disconnect, if the options is selected,
885 * the interface will be brought down */
886 if (conf->if_down == WANOPT_YES){
887 set_bit(DYN_OPT_ON,&chan->interface_down);
888 printk(KERN_INFO
889 "%s: Dynamic interface configuration enabled.\n",
890 card->devname);
891 }
892 }
893
894 } else if(strcmp(conf->usedby, "API") == 0){
895
896 chan->common.usedby = API;
897 printk(KERN_INFO "%s: Running in API mode.\n",
898 wandev->name);
899 }
900
901 if (err) {
902
903 kfree(chan);
904 return err;
905 }
906
907 /* place cir,be,bc and other channel specific information into the
908 * chan structure
909 */
910 if (conf->cir) {
911
912 chan->cir = max_t(unsigned int, 1,
913 min_t(unsigned int, conf->cir, 512));
914 chan->cir_status = CIR_ENABLED;
915
916
917 /* If CIR is enabled, force BC to equal CIR
918 * this solves number of potential problems if CIR is
919 * set and BC is not
920 */
921 chan->bc = chan->cir;
922
923 if (conf->be){
924 chan->be = max_t(unsigned int,
925 0, min_t(unsigned int, conf->be, 511));
926 }else{
927 conf->be = 0;
928 }
929
930 printk (KERN_INFO "%s: CIR enabled for DLCI %i \n",
931 wandev->name,chan->dlci);
932 printk (KERN_INFO "%s: CIR = %i ; BC = %i ; BE = %i\n",
933 wandev->name,chan->cir,chan->bc,chan->be);
934
935
936 }else{
937 chan->cir_status = CIR_DISABLED;
938 printk (KERN_INFO "%s: CIR disabled for DLCI %i\n",
939 wandev->name,chan->dlci);
940 }
941
942 chan->mc = conf->mc;
943
944 if (conf->inarp == WANOPT_YES){
945 printk(KERN_INFO "%s: Inverse ARP Support Enabled\n",card->devname);
946 chan->inarp = conf->inarp ? INARP_REQUEST : INARP_NONE;
947 chan->inarp_interval = conf->inarp_interval ? conf->inarp_interval : 10;
948 }else{
949 printk(KERN_INFO "%s: Inverse ARP Support Disabled\n",card->devname);
950 chan->inarp = INARP_NONE;
951 chan->inarp_interval = 10;
952 }
953
954
955 chan->dlci_configured = DLCI_NOT_CONFIGURED;
956
957
958 /*FIXME: IPX disabled in this WANPIPE version */
959 if (conf->enable_IPX == WANOPT_YES){
960 printk(KERN_INFO "%s: ERROR - This version of WANPIPE doesn't support IPX\n",
961 card->devname);
962 kfree(chan);
963 return -EINVAL;
964 }else{
965 chan->enable_IPX = WANOPT_NO;
966 }
967
968 if (conf->network_number){
969 chan->network_number = conf->network_number;
970 }else{
971 chan->network_number = 0xDEADBEEF;
972 }
973
974 chan->route_flag = NO_ROUTE;
975
976 init_chan_statistics(chan);
977
978 chan->transmit_length = 0;
979
980 /* prepare network device data space for registration */
981 strcpy(dev->name,chan->name);
982
983 dev->init = &if_init;
984 dev->priv = chan;
985
986 /* Initialize FR Polling Task Queue
987 * We need a poll routine for each network
988 * interface.
989 */
990 INIT_WORK(&chan->fr_poll_work, (void *)fr_poll, dev);
991
992 init_timer(&chan->fr_arp_timer);
993 chan->fr_arp_timer.data=(unsigned long)dev;
994 chan->fr_arp_timer.function = fr_arp;
995
996 wandev->new_if_cnt++;
997
998 /* Tells us that if this interface is a
999 * gateway or not */
1000 if ((chan->gateway = conf->gateway) == WANOPT_YES){
1001 printk(KERN_INFO "%s: Interface %s is set as a gateway.\n",
1002 card->devname,dev->name);
1003 }
1004
1005 /* M. Grant Patch Apr 28 2000
1006 * Disallow duplicate dlci configurations. */
1007 if (card->u.f.dlci_to_dev_map[chan->dlci] != NULL) {
1008 kfree(chan);
1009 return -EBUSY;
1010 }
1011
1012 /* Configure this dlci at a later date, when
1013 * the interface comes up. i.e. when if_open()
1014 * executes */
1015 set_bit(0,&chan->config_dlci);
1016
1017 printk(KERN_INFO "\n");
1018
1019 return 0;
1020}
1021
1022/*============================================================================
1023 * Delete logical channel.
1024 */
1025static int del_if(struct wan_device* wandev, struct net_device* dev)
1026{
1027 fr_channel_t* chan = dev->priv;
1028 unsigned long smp_flags=0;
1029
1030 /* This interface is dead, make sure the
1031 * ARP timer is stopped */
1032 del_timer(&chan->fr_arp_timer);
1033
1034 /* If we are a NODE, we must unconfigure this DLCI
1035 * Trigger an unconfigure command that will
1036 * be executed in timer interrupt. We must wait
1037 * for the command to complete. */
1038 trigger_unconfig_fr(dev);
1039
1040 lock_adapter_irq(&wandev->lock, &smp_flags);
1041 wandev->new_if_cnt--;
1042 unlock_adapter_irq(&wandev->lock, &smp_flags);
1043
1044 return 0;
1045}
1046
1047
1048/*=====================================================================
1049 * disable_comm
1050 *
1051 * Description:
1052 * Disable communications.
1053 * This code runs in shutdown (sdlamain.c)
1054 * under critical flag. Therefore it is not
1055 * necessary to set a critical flag here
1056 *
1057 * Usage:
1058 * Commnunications are disabled only on a card
1059 * shutdown.
1060 */
1061
1062static void disable_comm (sdla_t *card)
1063{
1064 printk(KERN_INFO "%s: Disabling Communications!\n",
1065 card->devname);
1066 fr_comm_disable(card);
1067}
1068
1069/****** WANPIPE-specific entry points ***************************************/
1070
1071/*============================================================================
1072 * Execute adapter interface command.
1073 */
1074static int wpf_exec (struct sdla* card, void* u_cmd, void* u_data)
1075{
1076 fr_mbox_t* mbox = card->mbox;
1077 int retry = MAX_CMD_RETRY;
1078 int err, len;
1079 fr_cmd_t cmd;
1080
1081 if(copy_from_user((void*)&cmd, u_cmd, sizeof(cmd)))
1082 return -EFAULT;
1083
1084 /* execute command */
1085 do
1086 {
1087 memcpy(&mbox->cmd, &cmd, sizeof(cmd));
1088
1089 if (cmd.length){
1090 if( copy_from_user((void*)&mbox->data, u_data, cmd.length))
1091 return -EFAULT;
1092 }
1093
1094 if (sdla_exec(mbox))
1095 err = mbox->cmd.result;
1096
1097 else return -EIO;
1098
1099 } while (err && retry-- && fr_event(card, err, mbox));
1100
1101 /* return result */
1102 if (copy_to_user(u_cmd, (void*)&mbox->cmd, sizeof(fr_cmd_t)))
1103 return -EFAULT;
1104
1105 len = mbox->cmd.length;
1106
1107 if (len && u_data && !copy_to_user(u_data, (void*)&mbox->data, len))
1108 return -EFAULT;
1109 return 0;
1110}
1111
1112/****** Network Device Interface ********************************************/
1113
1114/*============================================================================
1115 * Initialize Linux network interface.
1116 *
1117 * This routine is called only once for each interface, during Linux network
1118 * interface registration. Returning anything but zero will fail interface
1119 * registration.
1120 */
1121static int if_init(struct net_device* dev)
1122{
1123 fr_channel_t* chan = dev->priv;
1124 sdla_t* card = chan->card;
1125 struct wan_device* wandev = &card->wandev;
1126
1127 /* Initialize device driver entry points */
1128 dev->open = &if_open;
1129 dev->stop = &if_close;
1130 dev->hard_header = NULL;
1131 dev->rebuild_header = &if_rebuild_hdr;
1132 dev->hard_start_xmit = &if_send;
1133 dev->get_stats = &if_stats;
1134 dev->tx_timeout = &if_tx_timeout;
1135 dev->watchdog_timeo = TX_TIMEOUT;
1136
1137 if (chan->common.usedby == WANPIPE || chan->common.usedby == API){
1138
1139 /* Initialize media-specific parameters */
1140 if (chan->true_if_encoding){
1141 dev->type = ARPHRD_DLCI; /* This breaks tcpdump */
1142 }else{
1143 dev->type = ARPHRD_PPP; /* ARP h/w type */
1144 }
1145
1146 dev->flags |= IFF_POINTOPOINT;
1147 dev->flags |= IFF_NOARP;
1148
1149 /* Enable Multicast addressing */
1150 if (chan->mc == WANOPT_YES){
1151 dev->flags |= IFF_MULTICAST;
1152 }
1153
1154 dev->mtu = wandev->mtu - FR_HEADER_LEN;
1155 /* For an API, the maximum number of bytes that the stack will pass
1156 to the driver is (dev->mtu + dev->hard_header_len). So, adjust the
1157 mtu so that a frame of maximum size can be transmitted by the API.
1158 */
1159 if(chan->common.usedby == API) {
1160 dev->mtu += (sizeof(api_tx_hdr_t) - FR_HEADER_LEN);
1161 }
1162
1163 dev->hard_header_len = FR_HEADER_LEN;/* media header length */
1164 dev->addr_len = 2; /* hardware address length */
1165 *(unsigned short*)dev->dev_addr = htons(chan->dlci);
1166
1167 /* Set transmit buffer queue length */
1168 dev->tx_queue_len = 100;
1169
1170 }else{
1171
1172 /* Setup the interface for Bridging */
1173 int hw_addr=0;
1174 ether_setup(dev);
1175
1176 /* Use a random number to generate the MAC address */
1177 memcpy(dev->dev_addr, "\xFE\xFC\x00\x00\x00\x00", 6);
1178 get_random_bytes(&hw_addr, sizeof(hw_addr));
1179 *(int *)(dev->dev_addr + 2) += hw_addr;
1180 }
1181
1182 /* Initialize hardware parameters (just for reference) */
1183 dev->irq = wandev->irq;
1184 dev->dma = wandev->dma;
1185 dev->base_addr = wandev->ioport;
1186 dev->mem_start = wandev->maddr;
1187 dev->mem_end = wandev->maddr + wandev->msize - 1;
1188 SET_MODULE_OWNER(dev);
1189
1190 return 0;
1191}
1192
1193/*============================================================================
1194 * Open network interface.
1195 * o if this is the first open, then enable communications and interrupts.
1196 * o prevent module from unloading by incrementing use count
1197 *
1198 * Return 0 if O.k. or errno.
1199 */
1200static int if_open(struct net_device* dev)
1201{
1202 fr_channel_t* chan = dev->priv;
1203 sdla_t* card = chan->card;
1204 int err = 0;
1205 struct timeval tv;
1206
1207 if (netif_running(dev))
1208 return -EBUSY;
1209
1210 /* Initialize the task queue */
1211 chan->tq_working=0;
1212
1213 INIT_WORK(&chan->common.wanpipe_work, (void *)fr_bh, dev);
1214
1215 /* Allocate and initialize BH circular buffer */
1216 chan->bh_head = kmalloc((sizeof(bh_data_t)*MAX_BH_BUFF),GFP_ATOMIC);
1217 memset(chan->bh_head,0,(sizeof(bh_data_t)*MAX_BH_BUFF));
1218 atomic_set(&chan->bh_buff_used, 0);
1219
1220 netif_start_queue(dev);
1221
1222 wanpipe_open(card);
1223 do_gettimeofday( &tv );
1224 chan->router_start_time = tv.tv_sec;
1225
1226 if (test_bit(0,&chan->config_dlci)){
1227 trigger_config_fr (card);
1228 }else if (chan->inarp == INARP_REQUEST){
1229 trigger_fr_arp(dev);
1230 }
1231
1232 return err;
1233}
1234
1235/*============================================================================
1236 * Close network interface.
1237 * o if this is the last open, then disable communications and interrupts.
1238 * o reset flags.
1239 */
1240static int if_close(struct net_device* dev)
1241{
1242 fr_channel_t* chan = dev->priv;
1243 sdla_t* card = chan->card;
1244
1245 if (chan->inarp == INARP_CONFIGURED) {
1246 chan->inarp = INARP_REQUEST;
1247 }
1248
1249 netif_stop_queue(dev);
1250 wanpipe_close(card);
1251
1252 return 0;
1253}
1254
1255/*============================================================================
1256 * Re-build media header.
1257 *
1258 * Return: 1 physical address resolved.
1259 * 0 physical address not resolved
1260 */
1261static int if_rebuild_hdr (struct sk_buff* skb)
1262{
1263 struct net_device *dev = skb->dev;
1264 fr_channel_t* chan = dev->priv;
1265 sdla_t* card = chan->card;
1266
1267 printk(KERN_INFO "%s: rebuild_header() called for interface %s!\n",
1268 card->devname, dev->name);
1269 return 1;
1270}
1271
1272/*============================================================================
1273 * Handle transmit timeout event from netif watchdog
1274 */
1275static void if_tx_timeout(struct net_device *dev)
1276{
1277 fr_channel_t* chan = dev->priv;
1278 sdla_t *card = chan->card;
1279
1280 /* If our device stays busy for at least 5 seconds then we will
1281 * kick start the device by making dev->tbusy = 0. We expect
1282 * that our device never stays busy more than 5 seconds. So this
1283 * is only used as a last resort.
1284 */
1285
1286 chan->drvstats_if_send.if_send_tbusy++;
1287 ++chan->ifstats.collisions;
1288
1289 printk (KERN_INFO "%s: Transmit timed out on %s\n",
1290 card->devname, dev->name);
1291 chan->drvstats_if_send.if_send_tbusy_timeout++;
1292 netif_wake_queue (dev);
1293
1294}
1295
1296
1297/*============================================================================
1298 * Send a packet on a network interface.
1299 * o set tbusy flag (marks start of the transmission) to block a timer-based
1300 * transmit from overlapping.
1301 * o set critical flag when accessing board.
1302 * o check link state. If link is not up, then drop the packet.
1303 * o check channel status. If it's down then initiate a call.
1304 * o pass a packet to corresponding WAN device.
1305 * o free socket buffer
1306 *
1307 * Return: 0 complete (socket buffer must be freed)
1308 * non-0 packet may be re-transmitted (tbusy must be set)
1309 *
1310 * Notes:
1311 * 1. This routine is called either by the protocol stack or by the "net
1312 * bottom half" (with interrupts enabled).
1313 *
1314 * 2. Using netif_start_queue() and netif_stop_queue()
1315 * will inhibit further transmit requests from the protocol stack
1316 * and can be used for flow control with protocol layer.
1317 */
1318static int if_send(struct sk_buff* skb, struct net_device* dev)
1319{
1320 fr_channel_t* chan = dev->priv;
1321 sdla_t* card = chan->card;
1322 int err;
1323 unsigned char *sendpacket;
1324 fr508_flags_t* adptr_flags = card->flags;
1325 int udp_type;
1326 long delay_tx_queued = 0;
1327 unsigned long smp_flags=0;
1328 unsigned char attr = 0;
1329
1330 chan->drvstats_if_send.if_send_entry++;
1331
1332 netif_stop_queue(dev);
1333
1334 if (skb == NULL) {
1335 /* if we get here, some higher layer thinks we've missed an
1336 * tx-done interrupt.
1337 */
1338 printk(KERN_INFO "%s: interface %s got kicked!\n",
1339 card->devname, dev->name);
1340 chan->drvstats_if_send.if_send_skb_null ++;
1341
1342 netif_wake_queue(dev);
1343 return 0;
1344 }
1345
1346 /* If a peripheral task is running just drop packets */
1347 if (test_bit(PERI_CRIT, &card->wandev.critical)){
1348
1349 printk(KERN_INFO "%s: Critical in if_send(): Peripheral running!\n",
1350 card->devname);
1351
1352 dev_kfree_skb_any(skb);
1353 netif_start_queue(dev);
1354 return 0;
1355 }
1356
1357 /* We must set the 'tbusy' flag if we already have a packet queued for
1358 transmission in the transmit interrupt handler. However, we must
1359 ensure that the transmit interrupt does not reset the 'tbusy' flag
1360 just before we set it, as this will result in a "transmit timeout".
1361 */
1362 set_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical);
1363 if(chan->transmit_length) {
1364 netif_stop_queue(dev);
1365 chan->tick_counter = jiffies;
1366 clear_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical);
1367 return 1;
1368 }
1369 clear_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical);
1370
1371 /* Move the if_header() code to here. By inserting frame
1372 * relay header in if_header() we would break the
1373 * tcpdump and other packet sniffers */
1374 chan->fr_header_len = setup_fr_header(&skb,dev,chan->common.usedby);
1375 if (chan->fr_header_len < 0 ){
1376 ++chan->ifstats.tx_dropped;
1377 ++card->wandev.stats.tx_dropped;
1378
1379 dev_kfree_skb_any(skb);
1380 netif_start_queue(dev);
1381 return 0;
1382 }
1383
1384 sendpacket = skb->data;
1385
1386 udp_type = udp_pkt_type(skb, card);
1387
1388 if(udp_type != UDP_INVALID_TYPE) {
1389 if(store_udp_mgmt_pkt(udp_type, UDP_PKT_FRM_STACK, card, skb,
1390 chan->dlci)) {
1391 adptr_flags->imask |= FR_INTR_TIMER;
1392 if (udp_type == UDP_FPIPE_TYPE){
1393 chan->drvstats_if_send.
1394 if_send_PIPE_request ++;
1395 }
1396 }
1397 netif_start_queue(dev);
1398 return 0;
1399 }
1400
1401 //FIXME: can we do better than sendpacket[2]?
1402 if ((chan->common.usedby == WANPIPE) && (sendpacket[2] == 0x45)) {
1403
1404 /* check to see if the source IP address is a broadcast or */
1405 /* multicast IP address */
1406 if(chk_bcast_mcast_addr(card, dev, skb)){
1407 ++chan->ifstats.tx_dropped;
1408 ++card->wandev.stats.tx_dropped;
1409 dev_kfree_skb_any(skb);
1410 netif_start_queue(dev);
1411 return 0;
1412 }
1413 }
1414
1415
1416 /* Lock the S514/S508 card: SMP Supported */
1417 s508_s514_lock(card,&smp_flags);
1418
1419 if (test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
1420
1421 chan->drvstats_if_send.if_send_critical_non_ISR ++;
1422 chan->ifstats.tx_dropped ++;
1423 printk(KERN_INFO "%s Critical in IF_SEND: if_send() already running!\n",
1424 card->devname);
1425 goto if_send_start_and_exit;
1426 }
1427
1428 /* API packet check: minimum packet size must be greater than
1429 * 16 byte API header */
1430 if((chan->common.usedby == API) && (skb->len <= sizeof(api_tx_hdr_t))) {
1431 ++chan->ifstats.tx_dropped;
1432 ++card->wandev.stats.tx_dropped;
1433
1434
1435 goto if_send_start_and_exit;
1436
1437 }else{
1438 /* During API transmission, get rid of the API header */
1439 if (chan->common.usedby == API) {
1440 api_tx_hdr_t* api_tx_hdr;
1441 api_tx_hdr = (api_tx_hdr_t*)&skb->data[0x00];
1442 attr = api_tx_hdr->attr;
1443 skb_pull(skb,sizeof(api_tx_hdr_t));
1444 }
1445 }
1446
1447 if (card->wandev.state != WAN_CONNECTED) {
1448 chan->drvstats_if_send.if_send_wan_disconnected ++;
1449 ++chan->ifstats.tx_dropped;
1450 ++card->wandev.stats.tx_dropped;
1451
1452 } else if (chan->common.state != WAN_CONNECTED) {
1453 chan->drvstats_if_send.if_send_dlci_disconnected ++;
1454
1455 /* Update the DLCI state in timer interrupt */
1456 card->u.f.timer_int_enabled |= TMR_INT_ENABLED_UPDATE_STATE;
1457 adptr_flags->imask |= FR_INTR_TIMER;
1458
1459 ++chan->ifstats.tx_dropped;
1460 ++card->wandev.stats.tx_dropped;
1461
1462 } else if (!is_tx_ready(card, chan)) {
1463 /* No tx buffers available, store for delayed transmit */
1464 if (!setup_for_delayed_transmit(dev, skb)){
1465 set_bit(1,&delay_tx_queued);
1466 }
1467 chan->drvstats_if_send.if_send_no_bfrs++;
1468
1469 } else if (!skb->protocol) {
1470 /* No protocols drop packet */
1471 chan->drvstats_if_send.if_send_protocol_error ++;
1472 ++card->wandev.stats.tx_errors;
1473
1474 } else if (test_bit(ARP_CRIT,&card->wandev.critical)){
1475 /* We are trying to send an ARP Packet, block IP data until
1476 * ARP is sent */
1477 ++chan->ifstats.tx_dropped;
1478 ++card->wandev.stats.tx_dropped;
1479
1480 } else {
1481 //FIXME: IPX is not implemented in this version of Frame Relay ?
1482 if((chan->common.usedby == WANPIPE) &&
1483 sendpacket[1] == 0x00 &&
1484 sendpacket[2] == 0x80 &&
1485 sendpacket[6] == 0x81 &&
1486 sendpacket[7] == 0x37) {
1487
1488 if( chan->enable_IPX ) {
1489 switch_net_numbers(sendpacket,
1490 chan->network_number, 0);
1491 } else {
1492 //FIXME: Take this out when IPX is fixed
1493 printk(KERN_INFO
1494 "%s: WARNING: Unsupported IPX data in send, packet dropped\n",
1495 card->devname);
1496 }
1497
1498 }else{
1499 err = fr_send_data_header(card, chan->dlci, attr, skb->len, skb->data, chan->fr_header_len);
1500 if (err) {
1501 switch(err) {
1502 case FRRES_CIR_OVERFLOW:
1503 case FRRES_BUFFER_OVERFLOW:
1504 if (!setup_for_delayed_transmit(dev, skb)){
1505 set_bit(1,&delay_tx_queued);
1506 }
1507 chan->drvstats_if_send.
1508 if_send_adptr_bfrs_full ++;
1509 break;
1510
1511 case FRRES_TOO_LONG:
1512 if (net_ratelimit()){
1513 printk(KERN_INFO
1514 "%s: Error: Frame too long, transmission failed %i\n",
1515 card->devname, (unsigned int)skb->len);
1516 }
1517 /* Drop down to default */
1518 default:
1519 chan->drvstats_if_send.
1520 if_send_dlci_disconnected ++;
1521 ++chan->ifstats.tx_dropped;
1522 ++card->wandev.stats.tx_dropped;
1523 break;
1524 }
1525 } else {
1526 chan->drvstats_if_send.
1527 if_send_bfr_passed_to_adptr++;
1528 ++chan->ifstats.tx_packets;
1529 ++card->wandev.stats.tx_packets;
1530
1531 chan->ifstats.tx_bytes += skb->len;
1532 card->wandev.stats.tx_bytes += skb->len;
1533 dev->trans_start = jiffies;
1534 }
1535 }
1536 }
1537
1538if_send_start_and_exit:
1539
1540 netif_start_queue(dev);
1541
1542 /* If we queued the packet for transmission, we must not
1543 * deallocate it. The packet is unlinked from the IP stack
1544 * not copied. Therefore, we must keep the original packet */
1545 if (!test_bit(1,&delay_tx_queued)) {
1546 dev_kfree_skb_any(skb);
1547 }else{
1548 adptr_flags->imask |= FR_INTR_TXRDY;
1549 card->u.f.tx_interrupts_pending ++;
1550 }
1551
1552 clear_bit(SEND_CRIT, (void*)&card->wandev.critical);
1553
1554 s508_s514_unlock(card,&smp_flags);
1555
1556 return 0;
1557}
1558
1559
1560
1561/*============================================================================
1562 * Setup so that a frame can be transmitted on the occurrence of a transmit
1563 * interrupt.
1564 */
1565static int setup_for_delayed_transmit(struct net_device* dev,
1566 struct sk_buff *skb)
1567{
1568 fr_channel_t* chan = dev->priv;
1569 sdla_t* card = chan->card;
1570 fr_dlci_interface_t* dlci_interface;
1571 int len = skb->len;
1572
1573 /* Check that the dlci is properly configured,
1574 * before using tx interrupt */
1575 if (!chan->dlci_int_interface){
1576 if (net_ratelimit()){
1577 printk(KERN_INFO
1578 "%s: ERROR on DLCI %i: Not configured properly !\n",
1579 card->devname, chan->dlci);
1580 printk(KERN_INFO "%s: Please contact Sangoma Technologies\n",
1581 card->devname);
1582 }
1583 return 1;
1584 }
1585
1586 dlci_interface = chan->dlci_int_interface;
1587
1588 if(chan->transmit_length) {
1589 printk(KERN_INFO "%s: Big mess in setup_for_del...\n",
1590 card->devname);
1591 return 1;
1592 }
1593
1594 if(len > FR_MAX_NO_DATA_BYTES_IN_FRAME) {
1595 //FIXME: increment some statistic */
1596 return 1;
1597 }
1598
1599 skb_unlink(skb);
1600
1601 chan->transmit_length = len;
1602 chan->delay_skb = skb;
1603
1604 dlci_interface->gen_interrupt |= FR_INTR_TXRDY;
1605 dlci_interface->packet_length = len;
1606
1607 /* Turn on TX interrupt at the end of if_send */
1608 return 0;
1609}
1610
1611
1612/*============================================================================
1613 * Check to see if the packet to be transmitted contains a broadcast or
1614 * multicast source IP address.
1615 * Return 0 if not broadcast/multicast address, otherwise return 1.
1616 */
1617
1618static int chk_bcast_mcast_addr(sdla_t *card, struct net_device* dev,
1619 struct sk_buff *skb)
1620{
1621 u32 src_ip_addr;
1622 u32 broadcast_ip_addr = 0;
1623 struct in_device *in_dev;
1624 fr_channel_t* chan = dev->priv;
1625
1626 /* read the IP source address from the outgoing packet */
1627 src_ip_addr = *(u32 *)(skb->data + 14);
1628
1629 /* read the IP broadcast address for the device */
1630 in_dev = dev->ip_ptr;
1631 if(in_dev != NULL) {
1632 struct in_ifaddr *ifa= in_dev->ifa_list;
1633 if(ifa != NULL)
1634 broadcast_ip_addr = ifa->ifa_broadcast;
1635 else
1636 return 0;
1637 }
1638
1639 /* check if the IP Source Address is a Broadcast address */
1640 if((dev->flags & IFF_BROADCAST) && (src_ip_addr == broadcast_ip_addr)) {
1641 printk(KERN_INFO
1642 "%s: Broadcast Source Address silently discarded\n",
1643 card->devname);
1644 return 1;
1645 }
1646
1647 /* check if the IP Source Address is a Multicast address */
1648 if((chan->mc == WANOPT_NO) && (ntohl(src_ip_addr) >= 0xE0000001) &&
1649 (ntohl(src_ip_addr) <= 0xFFFFFFFE)) {
1650 printk(KERN_INFO
1651 "%s: Multicast Source Address silently discarded\n",
1652 card->devname);
1653 return 1;
1654 }
1655
1656 return 0;
1657}
1658
1659/*============================================================================
1660 * Reply to UDP Management system.
1661 * Return nothing.
1662 */
1663static int reply_udp( unsigned char *data, unsigned int mbox_len )
1664{
1665 unsigned short len, udp_length, temp, ip_length;
1666 unsigned long ip_temp;
1667 int even_bound = 0;
1668
1669
1670 fr_udp_pkt_t *fr_udp_pkt = (fr_udp_pkt_t *)data;
1671
1672 /* Set length of packet */
1673 len = //sizeof(fr_encap_hdr_t)+
1674 sizeof(ip_pkt_t)+
1675 sizeof(udp_pkt_t)+
1676 sizeof(wp_mgmt_t)+
1677 sizeof(cblock_t)+
1678 mbox_len;
1679
1680
1681 /* fill in UDP reply */
1682 fr_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
1683
1684 /* fill in UDP length */
1685 udp_length = sizeof(udp_pkt_t)+
1686 sizeof(wp_mgmt_t)+
1687 sizeof(cblock_t)+
1688 mbox_len;
1689
1690
1691 /* put it on an even boundary */
1692 if ( udp_length & 0x0001 ) {
1693 udp_length += 1;
1694 len += 1;
1695 even_bound = 1;
1696 }
1697
1698 temp = (udp_length<<8)|(udp_length>>8);
1699 fr_udp_pkt->udp_pkt.udp_length = temp;
1700
1701 /* swap UDP ports */
1702 temp = fr_udp_pkt->udp_pkt.udp_src_port;
1703 fr_udp_pkt->udp_pkt.udp_src_port =
1704 fr_udp_pkt->udp_pkt.udp_dst_port;
1705 fr_udp_pkt->udp_pkt.udp_dst_port = temp;
1706
1707
1708
1709 /* add UDP pseudo header */
1710 temp = 0x1100;
1711 *((unsigned short *)
1712 (fr_udp_pkt->data+mbox_len+even_bound)) = temp;
1713 temp = (udp_length<<8)|(udp_length>>8);
1714 *((unsigned short *)
1715 (fr_udp_pkt->data+mbox_len+even_bound+2)) = temp;
1716
1717 /* calculate UDP checksum */
1718 fr_udp_pkt->udp_pkt.udp_checksum = 0;
1719
1720 fr_udp_pkt->udp_pkt.udp_checksum =
1721 calc_checksum(&data[UDP_OFFSET/*+sizeof(fr_encap_hdr_t)*/],
1722 udp_length+UDP_OFFSET);
1723
1724 /* fill in IP length */
1725 ip_length = udp_length + sizeof(ip_pkt_t);
1726 temp = (ip_length<<8)|(ip_length>>8);
1727 fr_udp_pkt->ip_pkt.total_length = temp;
1728
1729 /* swap IP addresses */
1730 ip_temp = fr_udp_pkt->ip_pkt.ip_src_address;
1731 fr_udp_pkt->ip_pkt.ip_src_address =
1732 fr_udp_pkt->ip_pkt.ip_dst_address;
1733 fr_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
1734
1735
1736 /* fill in IP checksum */
1737 fr_udp_pkt->ip_pkt.hdr_checksum = 0;
1738 fr_udp_pkt->ip_pkt.hdr_checksum =
1739 calc_checksum(&data[/*sizeof(fr_encap_hdr_t)*/0],
1740 sizeof(ip_pkt_t));
1741
1742 return len;
1743} /* reply_udp */
1744
1745unsigned short calc_checksum (char *data, int len)
1746{
1747 unsigned short temp;
1748 unsigned long sum=0;
1749 int i;
1750
1751 for( i = 0; i <len; i+=2 ) {
1752 memcpy(&temp,&data[i],2);
1753 sum += (unsigned long)temp;
1754 }
1755
1756 while (sum >> 16 ) {
1757 sum = (sum & 0xffffUL) + (sum >> 16);
1758 }
1759
1760 temp = (unsigned short)sum;
1761 temp = ~temp;
1762
1763 if( temp == 0 )
1764 temp = 0xffff;
1765
1766 return temp;
1767}
1768
1769/*
1770 If incoming is 0 (outgoing)- if the net numbers is ours make it 0
1771 if incoming is 1 - if the net number is 0 make it ours
1772
1773*/
1774static void switch_net_numbers(unsigned char *sendpacket, unsigned long network_number, unsigned char incoming)
1775{
1776 unsigned long pnetwork_number;
1777
1778 pnetwork_number = (unsigned long)((sendpacket[14] << 24) +
1779 (sendpacket[15] << 16) + (sendpacket[16] << 8) +
1780 sendpacket[17]);
1781
1782 if (!incoming) {
1783 /* If the destination network number is ours, make it 0 */
1784 if( pnetwork_number == network_number) {
1785 sendpacket[14] = sendpacket[15] = sendpacket[16] =
1786 sendpacket[17] = 0x00;
1787 }
1788 } else {
1789 /* If the incoming network is 0, make it ours */
1790 if( pnetwork_number == 0) {
1791 sendpacket[14] = (unsigned char)(network_number >> 24);
1792 sendpacket[15] = (unsigned char)((network_number &
1793 0x00FF0000) >> 16);
1794 sendpacket[16] = (unsigned char)((network_number &
1795 0x0000FF00) >> 8);
1796 sendpacket[17] = (unsigned char)(network_number &
1797 0x000000FF);
1798 }
1799 }
1800
1801
1802 pnetwork_number = (unsigned long)((sendpacket[26] << 24) +
1803 (sendpacket[27] << 16) + (sendpacket[28] << 8) +
1804 sendpacket[29]);
1805
1806 if( !incoming ) {
1807 /* If the source network is ours, make it 0 */
1808 if( pnetwork_number == network_number) {
1809 sendpacket[26] = sendpacket[27] = sendpacket[28] =
1810 sendpacket[29] = 0x00;
1811 }
1812 } else {
1813 /* If the source network is 0, make it ours */
1814 if( pnetwork_number == 0 ) {
1815 sendpacket[26] = (unsigned char)(network_number >> 24);
1816 sendpacket[27] = (unsigned char)((network_number &
1817 0x00FF0000) >> 16);
1818 sendpacket[28] = (unsigned char)((network_number &
1819 0x0000FF00) >> 8);
1820 sendpacket[29] = (unsigned char)(network_number &
1821 0x000000FF);
1822 }
1823 }
1824} /* switch_net_numbers */
1825
1826/*============================================================================
1827 * Get ethernet-style interface statistics.
1828 * Return a pointer to struct enet_statistics.
1829 */
1830static struct net_device_stats *if_stats(struct net_device *dev)
1831{
1832 fr_channel_t* chan = dev->priv;
1833
1834 if(chan == NULL)
1835 return NULL;
1836
1837 return &chan->ifstats;
1838}
1839
1840/****** Interrupt Handlers **************************************************/
1841
1842/*============================================================================
1843 * fr_isr: S508 frame relay interrupt service routine.
1844 *
1845 * Description:
1846 * Frame relay main interrupt service route. This
1847 * function check the interrupt type and takes
1848 * the appropriate action.
1849 */
1850static void fr_isr (sdla_t* card)
1851{
1852 fr508_flags_t* flags = card->flags;
1853 char *ptr = &flags->iflag;
1854 int i,err;
1855 fr_mbox_t* mbox = card->mbox;
1856
1857 /* This flag prevents nesting of interrupts. See sdla_isr() routine
1858 * in sdlamain.c. */
1859 card->in_isr = 1;
1860
1861 ++card->statistics.isr_entry;
1862
1863
1864 /* All peripheral (configuraiton, re-configuration) events
1865 * take presidence over the ISR. Thus, retrigger */
1866 if (test_bit(PERI_CRIT, (void*)&card->wandev.critical)) {
1867 ++card->statistics.isr_already_critical;
1868 goto fr_isr_exit;
1869 }
1870
1871 if(card->hw.type != SDLA_S514) {
1872 if (test_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
1873 printk(KERN_INFO "%s: Critical while in ISR: If Send Running!\n",
1874 card->devname);
1875 ++card->statistics.isr_already_critical;
1876 goto fr_isr_exit;
1877 }
1878 }
1879
1880 switch (flags->iflag) {
1881
1882 case FR_INTR_RXRDY: /* receive interrupt */
1883 ++card->statistics.isr_rx;
1884 rx_intr(card);
1885 break;
1886
1887
1888 case FR_INTR_TXRDY: /* transmit interrupt */
1889 ++ card->statistics.isr_tx;
1890 tx_intr(card);
1891 break;
1892
1893 case FR_INTR_READY:
1894 Intr_test_counter++;
1895 ++card->statistics.isr_intr_test;
1896 break;
1897
1898 case FR_INTR_DLC: /* Event interrupt occurred */
1899 mbox->cmd.command = FR_READ_STATUS;
1900 mbox->cmd.length = 0;
1901 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
1902 if (err)
1903 fr_event(card, err, mbox);
1904 break;
1905
1906 case FR_INTR_TIMER: /* Timer interrupt */
1907 timer_intr(card);
1908 break;
1909
1910 default:
1911 ++card->statistics.isr_spurious;
1912 spur_intr(card);
1913 printk(KERN_INFO "%s: Interrupt Type 0x%02X!\n",
1914 card->devname, flags->iflag);
1915
1916 printk(KERN_INFO "%s: ID Bytes = ",card->devname);
1917 for(i = 0; i < 8; i ++)
1918 printk(KERN_INFO "0x%02X ", *(ptr + 0x28 + i));
1919 printk(KERN_INFO "\n");
1920
1921 break;
1922 }
1923
1924fr_isr_exit:
1925
1926 card->in_isr = 0;
1927 flags->iflag = 0;
1928 return;
1929}
1930
1931
1932
1933/*===========================================================
1934 * rx_intr Receive interrupt handler.
1935 *
1936 * Description
1937 * Upon receiveing an interrupt:
1938 * 1. Check that the firmware is in sync with
1939 * the driver.
1940 * 2. Find an appropriate network interface
1941 * based on the received dlci number.
1942 * 3. Check that the netowrk interface exists
1943 * and that it's setup properly.
1944 * 4. Copy the data into an skb buffer.
1945 * 5. Check the packet type and take
1946 * appropriate acton: UPD, API, ARP or Data.
1947 */
1948
1949static void rx_intr (sdla_t* card)
1950{
1951 fr_rx_buf_ctl_t* frbuf = card->rxmb;
1952 fr508_flags_t* flags = card->flags;
1953 fr_channel_t* chan;
1954 char *ptr = &flags->iflag;
1955 struct sk_buff* skb;
1956 struct net_device* dev;
1957 void* buf;
1958 unsigned dlci, len, offs, len_incl_hdr;
1959 int i, udp_type;
1960
1961
1962 /* Check that firmware buffers are in sync */
1963 if (frbuf->flag != 0x01) {
1964
1965 printk(KERN_INFO
1966 "%s: corrupted Rx buffer @ 0x%X, flag = 0x%02X!\n",
1967 card->devname, (unsigned)frbuf, frbuf->flag);
1968
1969 printk(KERN_INFO "%s: ID Bytes = ",card->devname);
1970 for(i = 0; i < 8; i ++)
1971 printk(KERN_INFO "0x%02X ", *(ptr + 0x28 + i));
1972 printk(KERN_INFO "\n");
1973
1974 ++card->statistics.rx_intr_corrupt_rx_bfr;
1975
1976 /* Bug Fix: Mar 6 2000
1977 * If we get a corrupted mailbox, it means that driver
1978 * is out of sync with the firmware. There is no recovery.
1979 * If we don't turn off all interrupts for this card
1980 * the machine will crash.
1981 */
1982 printk(KERN_INFO "%s: Critical router failure ...!!!\n", card->devname);
1983 printk(KERN_INFO "Please contact Sangoma Technologies !\n");
1984 fr_set_intr_mode(card, 0, 0, 0);
1985 return;
1986 }
1987
1988 len = frbuf->length;
1989 dlci = frbuf->dlci;
1990 offs = frbuf->offset;
1991
1992 /* Find the network interface for this packet */
1993 dev = find_channel(card, dlci);
1994
1995
1996 /* Check that the network interface is active and
1997 * properly setup */
1998 if (dev == NULL) {
1999 if( net_ratelimit()) {
2000 printk(KERN_INFO "%s: received data on unconfigured DLCI %d!\n",
2001 card->devname, dlci);
2002 }
2003 ++card->statistics.rx_intr_on_orphaned_DLCI;
2004 ++card->wandev.stats.rx_dropped;
2005 goto rx_done;
2006 }
2007
2008 if ((chan = dev->priv) == NULL){
2009 if( net_ratelimit()) {
2010 printk(KERN_INFO "%s: received data on unconfigured DLCI %d!\n",
2011 card->devname, dlci);
2012 }
2013 ++card->statistics.rx_intr_on_orphaned_DLCI;
2014 ++card->wandev.stats.rx_dropped;
2015 goto rx_done;
2016 }
2017
2018 skb = dev_alloc_skb(len);
2019
2020 if (!netif_running(dev) || (skb == NULL)){
2021
2022 ++chan->ifstats.rx_dropped;
2023
2024 if(skb == NULL) {
2025 if (net_ratelimit()) {
2026 printk(KERN_INFO
2027 "%s: no socket buffers available!\n",
2028 card->devname);
2029 }
2030 chan->drvstats_rx_intr.rx_intr_no_socket ++;
2031 }
2032
2033 if (!netif_running(dev)){
2034 chan->drvstats_rx_intr.
2035 rx_intr_dev_not_started ++;
2036 if (skb){
2037 dev_kfree_skb_any(skb);
2038 }
2039 }
2040 goto rx_done;
2041 }
2042
2043 /* Copy data from the board into the socket buffer */
2044 if ((offs + len) > card->u.f.rx_top + 1) {
2045 unsigned tmp = card->u.f.rx_top - offs + 1;
2046
2047 buf = skb_put(skb, tmp);
2048 sdla_peek(&card->hw, offs, buf, tmp);
2049 offs = card->u.f.rx_base;
2050 len -= tmp;
2051 }
2052
2053 buf = skb_put(skb, len);
2054 sdla_peek(&card->hw, offs, buf, len);
2055
2056
2057 /* We got the packet from the bard.
2058 * Check the packet type and take appropriate action */
2059
2060 udp_type = udp_pkt_type( skb, card );
2061
2062 if(udp_type != UDP_INVALID_TYPE) {
2063
2064 /* UDP Debug packet received, store the
2065 * packet and handle it in timer interrupt */
2066
2067 skb_pull(skb, 1);
2068 if (wanrouter_type_trans(skb, dev)){
2069 if(store_udp_mgmt_pkt(udp_type,UDP_PKT_FRM_NETWORK,card,skb,dlci)){
2070
2071 flags->imask |= FR_INTR_TIMER;
2072
2073 if (udp_type == UDP_FPIPE_TYPE){
2074 ++chan->drvstats_rx_intr.rx_intr_PIPE_request;
2075 }
2076 }
2077 }
2078
2079 }else if (chan->common.usedby == API) {
2080
2081 /* We are in API mode.
2082 * Add an API header to the RAW packet
2083 * and queue it into a circular buffer.
2084 * Then kick the fr_bh() bottom half handler */
2085
2086 api_rx_hdr_t* api_rx_hdr;
2087 chan->drvstats_rx_intr.rx_intr_bfr_passed_to_stack ++;
2088 chan->ifstats.rx_packets ++;
2089 card->wandev.stats.rx_packets ++;
2090
2091 chan->ifstats.rx_bytes += skb->len;
2092 card->wandev.stats.rx_bytes += skb->len;
2093
2094 skb_push(skb, sizeof(api_rx_hdr_t));
2095 api_rx_hdr = (api_rx_hdr_t*)&skb->data[0x00];
2096 api_rx_hdr->attr = frbuf->attr;
2097 api_rx_hdr->time_stamp = frbuf->tmstamp;
2098
2099 skb->protocol = htons(ETH_P_IP);
2100 skb->mac.raw = skb->data;
2101 skb->dev = dev;
2102 skb->pkt_type = WAN_PACKET_DATA;
2103
2104 bh_enqueue(dev, skb);
2105
2106 trigger_fr_bh(chan);
2107
2108 }else if (handle_IPXWAN(skb->data,chan->name,chan->enable_IPX, chan->network_number)){
2109
2110 //FIXME: Frame Relay IPX is not supported, Yet !
2111 //if (chan->enable_IPX) {
2112 // fr_send(card, dlci, 0, skb->len,skb->data);
2113 //}
2114 dev_kfree_skb_any(skb);
2115
2116 } else if (is_arp(skb->data)) {
2117
2118 /* ARP support enabled Mar 16 2000
2119 * Process incoming ARP reply/request, setup
2120 * dynamic routes. */
2121
2122 if (process_ARP((arphdr_1490_t *)skb->data, card, dev)) {
2123 if (net_ratelimit()){
2124 printk (KERN_INFO
2125 "%s: Error processing ARP Packet.\n",
2126 card->devname);
2127 }
2128 }
2129 dev_kfree_skb_any(skb);
2130
2131 } else if (skb->data[0] != 0x03) {
2132
2133 if (net_ratelimit()) {
2134 printk(KERN_INFO "%s: Non IETF packet discarded.\n",
2135 card->devname);
2136 }
2137 dev_kfree_skb_any(skb);
2138
2139 } else {
2140
2141 len_incl_hdr = skb->len;
2142 /* Decapsulate packet and pass it up the
2143 protocol stack */
2144 skb->dev = dev;
2145
2146 if (chan->common.usedby == BRIDGE || chan->common.usedby == BRIDGE_NODE){
2147
2148 /* Make sure it's an Ethernet frame, otherwise drop it */
2149 if (!memcmp(skb->data, "\x03\x00\x80\x00\x80\xC2\x00\x07", 8)) {
2150 skb_pull(skb, 8);
2151 skb->protocol=eth_type_trans(skb,dev);
2152 }else{
2153 ++chan->drvstats_rx_intr.rx_intr_bfr_not_passed_to_stack;
2154 ++chan->ifstats.rx_errors;
2155 ++card->wandev.stats.rx_errors;
2156 goto rx_done;
2157 }
2158 }else{
2159
2160 /* remove hardware header */
2161 buf = skb_pull(skb, 1);
2162
2163 if (!wanrouter_type_trans(skb, dev)) {
2164
2165 /* can't decapsulate packet */
2166 dev_kfree_skb_any(skb);
2167
2168 ++chan->drvstats_rx_intr.rx_intr_bfr_not_passed_to_stack;
2169 ++chan->ifstats.rx_errors;
2170 ++card->wandev.stats.rx_errors;
2171 goto rx_done;
2172 }
2173 skb->mac.raw = skb->data;
2174 }
2175
2176
2177 /* Send a packet up the IP stack */
2178 skb->dev->last_rx = jiffies;
2179 netif_rx(skb);
2180 ++chan->drvstats_rx_intr.rx_intr_bfr_passed_to_stack;
2181 ++chan->ifstats.rx_packets;
2182 ++card->wandev.stats.rx_packets;
2183
2184 chan->ifstats.rx_bytes += len_incl_hdr;
2185 card->wandev.stats.rx_bytes += len_incl_hdr;
2186 }
2187
2188rx_done:
2189
2190 /* Release buffer element and calculate a pointer to the next one */
2191 frbuf->flag = 0;
2192 card->rxmb = ++frbuf;
2193 if ((void*)frbuf > card->u.f.rxmb_last)
2194 card->rxmb = card->u.f.rxmb_base;
2195
2196}
2197
2198/*==================================================================
2199 * tx_intr: Transmit interrupt handler.
2200 *
2201 * Rationale:
2202 * If the board is busy transmitting, if_send() will
2203 * buffers a single packet and turn on
2204 * the tx interrupt. Tx interrupt will be called
2205 * by the board, once the firmware can send more
2206 * data. Thus, no polling is required.
2207 *
2208 * Description:
2209 * Tx interrupt is called for each
2210 * configured dlci channel. Thus:
2211 * 1. Obtain the netowrk interface based on the
2212 * dlci number.
2213 * 2. Check that network interface is up and
2214 * properly setup.
2215 * 3. Check for a buffered packet.
2216 * 4. Transmit the packet.
2217 * 5. If we are in WANPIPE mode, mark the
2218 * NET_BH handler.
2219 * 6. If we are in API mode, kick
2220 * the AF_WANPIPE socket for more data.
2221 *
2222 */
2223static void tx_intr(sdla_t *card)
2224{
2225 fr508_flags_t* flags = card->flags;
2226 fr_tx_buf_ctl_t* bctl;
2227 struct net_device* dev;
2228 fr_channel_t* chan;
2229
2230 if(card->hw.type == SDLA_S514){
2231 bctl = (void*)(flags->tse_offs + card->hw.dpmbase);
2232 }else{
2233 bctl = (void*)(flags->tse_offs - FR_MB_VECTOR +
2234 card->hw.dpmbase);
2235 }
2236
2237 /* Find the structure and make it unbusy */
2238 dev = find_channel(card, flags->dlci);
2239 if (dev == NULL){
2240 printk(KERN_INFO "NO DEV IN TX Interrupt\n");
2241 goto end_of_tx_intr;
2242 }
2243
2244 if ((chan = dev->priv) == NULL){
2245 printk(KERN_INFO "NO CHAN IN TX Interrupt\n");
2246 goto end_of_tx_intr;
2247 }
2248
2249 if(!chan->transmit_length || !chan->delay_skb) {
2250 printk(KERN_INFO "%s: tx int error - transmit length zero\n",
2251 card->wandev.name);
2252 goto end_of_tx_intr;
2253 }
2254
2255 /* If the 'if_send()' procedure is currently checking the 'tbusy'
2256 status, then we cannot transmit. Instead, we configure the microcode
2257 so as to re-issue this transmit interrupt at a later stage.
2258 */
2259 if (test_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical)) {
2260
2261 fr_dlci_interface_t* dlci_interface = chan->dlci_int_interface;
2262 bctl->flag = 0xA0;
2263 dlci_interface->gen_interrupt |= FR_INTR_TXRDY;
2264 return;
2265
2266 }else{
2267 bctl->dlci = flags->dlci;
2268 bctl->length = chan->transmit_length+chan->fr_header_len;
2269 sdla_poke(&card->hw,
2270 fr_send_hdr(card,bctl->dlci,bctl->offset),
2271 chan->delay_skb->data,
2272 chan->delay_skb->len);
2273 bctl->flag = 0xC0;
2274
2275 ++chan->ifstats.tx_packets;
2276 ++card->wandev.stats.tx_packets;
2277 chan->ifstats.tx_bytes += chan->transmit_length;
2278 card->wandev.stats.tx_bytes += chan->transmit_length;
2279
2280 /* We must free an sk buffer, which we used
2281 * for delayed transmission; Otherwise, the sock
2282 * will run out of memory */
2283 dev_kfree_skb_any(chan->delay_skb);
2284
2285 chan->delay_skb = NULL;
2286 chan->transmit_length = 0;
2287
2288 dev->trans_start = jiffies;
2289
2290 if (netif_queue_stopped(dev)){
2291 /* If using API, than wakeup socket BH handler */
2292 if (chan->common.usedby == API){
2293 netif_start_queue(dev);
2294 wakeup_sk_bh(dev);
2295 }else{
2296 netif_wake_queue(dev);
2297 }
2298 }
2299 }
2300
2301end_of_tx_intr:
2302
2303 /* if any other interfaces have transmit interrupts pending,
2304 * do not disable the global transmit interrupt */
2305 if(!(-- card->u.f.tx_interrupts_pending))
2306 flags->imask &= ~FR_INTR_TXRDY;
2307
2308
2309}
2310
2311
2312/*============================================================================
2313 * timer_intr: Timer interrupt handler.
2314 *
2315 * Rationale:
2316 * All commans must be executed within the timer
2317 * interrupt since no two commands should execute
2318 * at the same time.
2319 *
2320 * Description:
2321 * The timer interrupt is used to:
2322 * 1. Processing udp calls from 'fpipemon'.
2323 * 2. Processing update calls from /proc file system
2324 * 3. Reading board-level statistics for
2325 * updating the proc file system.
2326 * 4. Sending inverse ARP request packets.
2327 * 5. Configure a dlci/channel.
2328 * 6. Unconfigure a dlci/channel. (Node only)
2329 */
2330
2331static void timer_intr(sdla_t *card)
2332{
2333 fr508_flags_t* flags = card->flags;
2334
2335 /* UDP Debuging: fpipemon call */
2336 if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_UDP) {
2337 if(card->u.f.udp_type == UDP_FPIPE_TYPE) {
2338 if(process_udp_mgmt_pkt(card)) {
2339 card->u.f.timer_int_enabled &=
2340 ~TMR_INT_ENABLED_UDP;
2341 }
2342 }
2343 }
2344
2345 /* /proc update call : triggered from update() */
2346 if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_UPDATE) {
2347 fr_get_err_stats(card);
2348 fr_get_stats(card);
2349 card->u.f.update_comms_stats = 0;
2350 card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE;
2351 }
2352
2353 /* Update the channel state call. This is call is
2354 * triggered by if_send() function */
2355 if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_UPDATE_STATE){
2356 struct net_device *dev;
2357 if (card->wandev.state == WAN_CONNECTED){
2358 for (dev = card->wandev.dev; dev;
2359 dev = *((struct net_device **)dev->priv)){
2360 fr_channel_t *chan = dev->priv;
2361 if (chan->common.state != WAN_CONNECTED){
2362 update_chan_state(dev);
2363 }
2364 }
2365 }
2366 card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE_STATE;
2367 }
2368
2369 /* configure a dlci/channel */
2370 if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_CONFIG){
2371 config_fr(card);
2372 card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_CONFIG;
2373 }
2374
2375 /* unconfigure a dlci/channel */
2376 if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_UNCONFIG){
2377 unconfig_fr(card);
2378 card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UNCONFIG;
2379 }
2380
2381
2382 /* Transmit ARP packets */
2383 if (card->u.f.timer_int_enabled & TMR_INT_ENABLED_ARP){
2384 int i=0;
2385 struct net_device *dev;
2386
2387 if (card->u.f.arp_dev == NULL)
2388 card->u.f.arp_dev = card->wandev.dev;
2389
2390 dev = card->u.f.arp_dev;
2391
2392 for (;;){
2393
2394 fr_channel_t *chan = dev->priv;
2395
2396 /* If the interface is brought down cancel sending In-ARPs */
2397 if (!(dev->flags&IFF_UP)){
2398 clear_bit(0,&chan->inarp_ready);
2399 }
2400
2401 if (test_bit(0,&chan->inarp_ready)){
2402
2403 if (check_tx_status(card,dev)){
2404 set_bit(ARP_CRIT,&card->wandev.critical);
2405 break;
2406 }
2407
2408 if (!send_inarp_request(card,dev)){
2409 trigger_fr_arp(dev);
2410 chan->inarp_tick = jiffies;
2411 }
2412
2413 clear_bit(0,&chan->inarp_ready);
2414 dev = move_dev_to_next(card,dev);
2415 break;
2416 }
2417 dev = move_dev_to_next(card,dev);
2418
2419 if (++i == card->wandev.new_if_cnt){
2420 card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_ARP;
2421 break;
2422 }
2423 }
2424 card->u.f.arp_dev = dev;
2425 }
2426
2427 if(!card->u.f.timer_int_enabled)
2428 flags->imask &= ~FR_INTR_TIMER;
2429}
2430
2431
2432/*============================================================================
2433 * spur_intr: Spurious interrupt handler.
2434 *
2435 * Description:
2436 * We don't know this interrupt.
2437 * Print a warning.
2438 */
2439
2440static void spur_intr (sdla_t* card)
2441{
2442 if (net_ratelimit()){
2443 printk(KERN_INFO "%s: spurious interrupt!\n", card->devname);
2444 }
2445}
2446
2447
2448//FIXME: Fix the IPX in next version
2449/*===========================================================================
2450 * Return 0 for non-IPXWAN packet
2451 * 1 for IPXWAN packet or IPX is not enabled!
2452 * FIXME: Use a IPX structure here not offsets
2453 */
2454static int handle_IPXWAN(unsigned char *sendpacket,
2455 char *devname, unsigned char enable_IPX,
2456 unsigned long network_number)
2457{
2458 int i;
2459
2460 if( sendpacket[1] == 0x00 && sendpacket[2] == 0x80 &&
2461 sendpacket[6] == 0x81 && sendpacket[7] == 0x37) {
2462
2463 /* It's an IPX packet */
2464 if (!enable_IPX){
2465 /* Return 1 so we don't pass it up the stack. */
2466 //FIXME: Take this out when IPX is fixed
2467 if (net_ratelimit()){
2468 printk (KERN_INFO
2469 "%s: WARNING: Unsupported IPX packet received and dropped\n",
2470 devname);
2471 }
2472 return 1;
2473 }
2474 } else {
2475 /* It's not IPX so return and pass it up the stack. */
2476 return 0;
2477 }
2478
2479 if( sendpacket[24] == 0x90 && sendpacket[25] == 0x04){
2480 /* It's IPXWAN */
2481
2482 if( sendpacket[10] == 0x02 && sendpacket[42] == 0x00){
2483
2484 /* It's a timer request packet */
2485 printk(KERN_INFO "%s: Received IPXWAN Timer Request packet\n",
2486 devname);
2487
2488 /* Go through the routing options and answer no to every
2489 * option except Unnumbered RIP/SAP
2490 */
2491 for(i = 49; sendpacket[i] == 0x00; i += 5){
2492 /* 0x02 is the option for Unnumbered RIP/SAP */
2493 if( sendpacket[i + 4] != 0x02){
2494 sendpacket[i + 1] = 0;
2495 }
2496 }
2497
2498 /* Skip over the extended Node ID option */
2499 if( sendpacket[i] == 0x04 ){
2500 i += 8;
2501 }
2502
2503 /* We also want to turn off all header compression opt.
2504 */
2505 for(; sendpacket[i] == 0x80 ;){
2506 sendpacket[i + 1] = 0;
2507 i += (sendpacket[i + 2] << 8) + (sendpacket[i + 3]) + 4;
2508 }
2509
2510 /* Set the packet type to timer response */
2511 sendpacket[42] = 0x01;
2512
2513 printk(KERN_INFO "%s: Sending IPXWAN Timer Response\n",
2514 devname);
2515
2516 } else if( sendpacket[42] == 0x02 ){
2517
2518 /* This is an information request packet */
2519 printk(KERN_INFO
2520 "%s: Received IPXWAN Information Request packet\n",
2521 devname);
2522
2523 /* Set the packet type to information response */
2524 sendpacket[42] = 0x03;
2525
2526 /* Set the router name */
2527 sendpacket[59] = 'F';
2528 sendpacket[60] = 'P';
2529 sendpacket[61] = 'I';
2530 sendpacket[62] = 'P';
2531 sendpacket[63] = 'E';
2532 sendpacket[64] = '-';
2533 sendpacket[65] = CVHexToAscii(network_number >> 28);
2534 sendpacket[66] = CVHexToAscii((network_number & 0x0F000000)>> 24);
2535 sendpacket[67] = CVHexToAscii((network_number & 0x00F00000)>> 20);
2536 sendpacket[68] = CVHexToAscii((network_number & 0x000F0000)>> 16);
2537 sendpacket[69] = CVHexToAscii((network_number & 0x0000F000)>> 12);
2538 sendpacket[70] = CVHexToAscii((network_number & 0x00000F00)>> 8);
2539 sendpacket[71] = CVHexToAscii((network_number & 0x000000F0)>> 4);
2540 sendpacket[72] = CVHexToAscii(network_number & 0x0000000F);
2541 for(i = 73; i < 107; i+= 1)
2542 {
2543 sendpacket[i] = 0;
2544 }
2545
2546 printk(KERN_INFO "%s: Sending IPXWAN Information Response packet\n",
2547 devname);
2548 } else {
2549
2550 printk(KERN_INFO "%s: Unknown IPXWAN packet!\n",devname);
2551 return 0;
2552 }
2553
2554 /* Set the WNodeID to our network address */
2555 sendpacket[43] = (unsigned char)(network_number >> 24);
2556 sendpacket[44] = (unsigned char)((network_number & 0x00FF0000) >> 16);
2557 sendpacket[45] = (unsigned char)((network_number & 0x0000FF00) >> 8);
2558 sendpacket[46] = (unsigned char)(network_number & 0x000000FF);
2559
2560 return 1;
2561 }
2562
2563 /* If we get here, it's an IPX-data packet so it'll get passed up the
2564 * stack.
2565 * switch the network numbers
2566 */
2567 switch_net_numbers(sendpacket, network_number ,1);
2568 return 0;
2569}
2570/*============================================================================
2571 * process_route
2572 *
2573 * Rationale:
2574 * If the interface goes down, or we receive an ARP request,
2575 * we have to change the network interface ip addresses.
2576 * This cannot be done within the interrupt.
2577 *
2578 * Description:
2579 *
2580 * This routine is called as a polling routine to dynamically
2581 * add/delete routes negotiated by inverse ARP. It is in this
2582 * "task" because we don't want routes to be added while in
2583 * interrupt context.
2584 *
2585 * Usage:
2586 * This function is called by fr_poll() polling funtion.
2587 */
2588
2589static void process_route(struct net_device *dev)
2590{
2591 fr_channel_t *chan = dev->priv;
2592 sdla_t *card = chan->card;
2593
2594 struct ifreq if_info;
2595 struct sockaddr_in *if_data;
2596 mm_segment_t fs = get_fs();
2597 u32 ip_tmp;
2598 int err;
2599
2600
2601 switch(chan->route_flag){
2602
2603 case ADD_ROUTE:
2604
2605 /* Set remote addresses */
2606 memset(&if_info, 0, sizeof(if_info));
2607 strcpy(if_info.ifr_name, dev->name);
2608
2609 set_fs(get_ds()); /* get user space block */
2610
2611 if_data = (struct sockaddr_in *)&if_info.ifr_dstaddr;
2612 if_data->sin_addr.s_addr = chan->ip_remote;
2613 if_data->sin_family = AF_INET;
2614 err = devinet_ioctl( SIOCSIFDSTADDR, &if_info );
2615
2616 set_fs(fs); /* restore old block */
2617
2618 if (err) {
2619 printk(KERN_INFO
2620 "%s: Route Add failed. Error: %d\n",
2621 card->devname,err);
2622 printk(KERN_INFO "%s: Address: %u.%u.%u.%u\n",
2623 chan->name, NIPQUAD(chan->ip_remote));
2624
2625 }else {
2626 printk(KERN_INFO "%s: Route Added Successfully: %u.%u.%u.%u\n",
2627 card->devname,NIPQUAD(chan->ip_remote));
2628 chan->route_flag = ROUTE_ADDED;
2629 }
2630 break;
2631
2632 case REMOVE_ROUTE:
2633
2634 /* Set remote addresses */
2635 memset(&if_info, 0, sizeof(if_info));
2636 strcpy(if_info.ifr_name, dev->name);
2637
2638 ip_tmp = get_ip_address(dev,WAN_POINTOPOINT_IP);
2639
2640 set_fs(get_ds()); /* get user space block */
2641
2642 if_data = (struct sockaddr_in *)&if_info.ifr_dstaddr;
2643 if_data->sin_addr.s_addr = 0;
2644 if_data->sin_family = AF_INET;
2645 err = devinet_ioctl( SIOCSIFDSTADDR, &if_info );
2646
2647 set_fs(fs);
2648
2649 if (err) {
2650 printk(KERN_INFO
2651 "%s: Deleting of route failed. Error: %d\n",
2652 card->devname,err);
2653 printk(KERN_INFO "%s: Address: %u.%u.%u.%u\n",
2654 dev->name,NIPQUAD(chan->ip_remote) );
2655
2656 } else {
2657 printk(KERN_INFO "%s: Route Removed Sucessfuly: %u.%u.%u.%u\n",
2658 card->devname,NIPQUAD(ip_tmp));
2659 chan->route_flag = NO_ROUTE;
2660 }
2661 break;
2662
2663 } /* Case Statement */
2664
2665}
2666
2667
2668
2669/****** Frame Relay Firmware-Specific Functions *****************************/
2670
2671/*============================================================================
2672 * Read firmware code version.
2673 * o fill string str with firmware version info.
2674 */
2675static int fr_read_version (sdla_t* card, char* str)
2676{
2677 fr_mbox_t* mbox = card->mbox;
2678 int retry = MAX_CMD_RETRY;
2679 int err;
2680
2681 do
2682 {
2683 mbox->cmd.command = FR_READ_CODE_VERSION;
2684 mbox->cmd.length = 0;
2685 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2686 } while (err && retry-- && fr_event(card, err, mbox));
2687
2688 if (!err && str) {
2689 int len = mbox->cmd.length;
2690 memcpy(str, mbox->data, len);
2691 str[len] = '\0';
2692 }
2693 return err;
2694}
2695
2696/*============================================================================
2697 * Set global configuration.
2698 */
2699static int fr_configure (sdla_t* card, fr_conf_t *conf)
2700{
2701 fr_mbox_t* mbox = card->mbox;
2702 int retry = MAX_CMD_RETRY;
2703 int dlci_num = card->u.f.dlci_num;
2704 int err, i;
2705
2706 do
2707 {
2708 memcpy(mbox->data, conf, sizeof(fr_conf_t));
2709
2710 if (dlci_num) for (i = 0; i < dlci_num; ++i)
2711 ((fr_conf_t*)mbox->data)->dlci[i] =
2712 card->u.f.node_dlci[i];
2713
2714 mbox->cmd.command = FR_SET_CONFIG;
2715 mbox->cmd.length =
2716 sizeof(fr_conf_t) + dlci_num * sizeof(short);
2717
2718 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2719
2720 } while (err && retry-- && fr_event(card, err, mbox));
2721
2722 /*NC Oct 12 2000 */
2723 if (err != CMD_OK){
2724 printk(KERN_ERR "%s: Frame Relay Configuration Failed: rc=0x%x\n",
2725 card->devname,err);
2726 }
2727
2728 return err;
2729}
2730
2731/*============================================================================
2732 * Set DLCI configuration.
2733 */
2734static int fr_dlci_configure (sdla_t* card, fr_dlc_conf_t *conf, unsigned dlci)
2735{
2736 fr_mbox_t* mbox = card->mbox;
2737 int retry = MAX_CMD_RETRY;
2738 int err;
2739
2740 do
2741 {
2742 memcpy(mbox->data, conf, sizeof(fr_dlc_conf_t));
2743 mbox->cmd.dlci = (unsigned short) dlci;
2744 mbox->cmd.command = FR_SET_CONFIG;
2745 mbox->cmd.length = sizeof(fr_dlc_conf_t);
2746 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2747 } while (err && retry--);
2748
2749 return err;
2750}
2751/*============================================================================
2752 * Set interrupt mode.
2753 */
2754static int fr_set_intr_mode (sdla_t* card, unsigned mode, unsigned mtu,
2755 unsigned short timeout)
2756{
2757 fr_mbox_t* mbox = card->mbox;
2758 fr508_intr_ctl_t* ictl = (void*)mbox->data;
2759 int retry = MAX_CMD_RETRY;
2760 int err;
2761
2762 do
2763 {
2764 memset(ictl, 0, sizeof(fr508_intr_ctl_t));
2765 ictl->mode = mode;
2766 ictl->tx_len = mtu;
2767 ictl->irq = card->hw.irq;
2768
2769 /* indicate timeout on timer */
2770 if (mode & 0x20) ictl->timeout = timeout;
2771
2772 mbox->cmd.length = sizeof(fr508_intr_ctl_t);
2773 mbox->cmd.command = FR_SET_INTR_MODE;
2774 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2775
2776 } while (err && retry-- && fr_event(card, err, mbox));
2777
2778 return err;
2779}
2780
2781/*============================================================================
2782 * Enable communications.
2783 */
2784static int fr_comm_enable (sdla_t* card)
2785{
2786 fr_mbox_t* mbox = card->mbox;
2787 int retry = MAX_CMD_RETRY;
2788 int err;
2789
2790 do
2791 {
2792 mbox->cmd.command = FR_COMM_ENABLE;
2793 mbox->cmd.length = 0;
2794 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2795 } while (err && retry-- && fr_event(card, err, mbox));
2796
2797 return err;
2798}
2799
2800/*============================================================================
2801 * fr_comm_disable
2802 *
2803 * Warning: This functin is called by the shutdown() procedure. It is void
2804 * since dev->priv are has already been deallocated and no
2805 * error checking is possible using fr_event() function.
2806 */
2807static void fr_comm_disable (sdla_t* card)
2808{
2809 fr_mbox_t* mbox = card->mbox;
2810 int retry = MAX_CMD_RETRY;
2811 int err;
2812
2813 do {
2814 mbox->cmd.command = FR_SET_MODEM_STATUS;
2815 mbox->cmd.length = 1;
2816 mbox->data[0] = 0;
2817 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2818 } while (err && retry--);
2819
2820 retry = MAX_CMD_RETRY;
2821
2822 do
2823 {
2824 mbox->cmd.command = FR_COMM_DISABLE;
2825 mbox->cmd.length = 0;
2826 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2827 } while (err && retry--);
2828
2829 return;
2830}
2831
2832
2833
2834/*============================================================================
2835 * Get communications error statistics.
2836 */
2837static int fr_get_err_stats (sdla_t* card)
2838{
2839 fr_mbox_t* mbox = card->mbox;
2840 int retry = MAX_CMD_RETRY;
2841 int err;
2842
2843
2844 do
2845 {
2846 mbox->cmd.command = FR_READ_ERROR_STATS;
2847 mbox->cmd.length = 0;
2848 mbox->cmd.dlci = 0;
2849 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2850 } while (err && retry-- && fr_event(card, err, mbox));
2851
2852 if (!err) {
2853 fr_comm_stat_t* stats = (void*)mbox->data;
2854 card->wandev.stats.rx_over_errors = stats->rx_overruns;
2855 card->wandev.stats.rx_crc_errors = stats->rx_bad_crc;
2856 card->wandev.stats.rx_missed_errors = stats->rx_aborts;
2857 card->wandev.stats.rx_length_errors = stats->rx_too_long;
2858 card->wandev.stats.tx_aborted_errors = stats->tx_aborts;
2859
2860 }
2861
2862 return err;
2863}
2864
2865/*============================================================================
2866 * Get statistics.
2867 */
2868static int fr_get_stats (sdla_t* card)
2869{
2870 fr_mbox_t* mbox = card->mbox;
2871 int retry = MAX_CMD_RETRY;
2872 int err;
2873
2874
2875 do
2876 {
2877 mbox->cmd.command = FR_READ_STATISTICS;
2878 mbox->cmd.length = 0;
2879 mbox->cmd.dlci = 0;
2880 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2881 } while (err && retry-- && fr_event(card, err, mbox));
2882
2883 if (!err) {
2884 fr_link_stat_t* stats = (void*)mbox->data;
2885 card->wandev.stats.rx_frame_errors = stats->rx_bad_format;
2886 card->wandev.stats.rx_dropped =
2887 stats->rx_dropped + stats->rx_dropped2;
2888 }
2889
2890 return err;
2891}
2892
2893/*============================================================================
2894 * Add DLCI(s) (Access Node only!).
2895 * This routine will perform the ADD_DLCIs command for the specified DLCI.
2896 */
2897static int fr_add_dlci (sdla_t* card, int dlci)
2898{
2899 fr_mbox_t* mbox = card->mbox;
2900 int retry = MAX_CMD_RETRY;
2901 int err;
2902
2903 do
2904 {
2905 unsigned short* dlci_list = (void*)mbox->data;
2906
2907 mbox->cmd.length = sizeof(short);
2908 dlci_list[0] = dlci;
2909 mbox->cmd.command = FR_ADD_DLCI;
2910 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2911
2912 } while (err && retry-- && fr_event(card, err, mbox));
2913
2914 return err;
2915}
2916
2917/*============================================================================
2918 * Activate DLCI(s) (Access Node only!).
2919 * This routine will perform the ACTIVATE_DLCIs command with a DLCI number.
2920 */
2921static int fr_activate_dlci (sdla_t* card, int dlci)
2922{
2923 fr_mbox_t* mbox = card->mbox;
2924 int retry = MAX_CMD_RETRY;
2925 int err;
2926
2927 do
2928 {
2929 unsigned short* dlci_list = (void*)mbox->data;
2930
2931 mbox->cmd.length = sizeof(short);
2932 dlci_list[0] = dlci;
2933 mbox->cmd.command = FR_ACTIVATE_DLCI;
2934 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2935
2936 } while (err && retry-- && fr_event(card, err, mbox));
2937
2938 return err;
2939}
2940
2941/*============================================================================
2942 * Delete DLCI(s) (Access Node only!).
2943 * This routine will perform the DELETE_DLCIs command with a DLCI number.
2944 */
2945static int fr_delete_dlci (sdla_t* card, int dlci)
2946{
2947 fr_mbox_t* mbox = card->mbox;
2948 int retry = MAX_CMD_RETRY;
2949 int err;
2950
2951 do
2952 {
2953 unsigned short* dlci_list = (void*)mbox->data;
2954
2955 mbox->cmd.length = sizeof(short);
2956 dlci_list[0] = dlci;
2957 mbox->cmd.command = FR_DELETE_DLCI;
2958 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2959
2960 } while (err && retry-- && fr_event(card, err, mbox));
2961
2962 return err;
2963}
2964
2965
2966
2967/*============================================================================
2968 * Issue in-channel signalling frame.
2969 */
2970static int fr_issue_isf (sdla_t* card, int isf)
2971{
2972 fr_mbox_t* mbox = card->mbox;
2973 int retry = MAX_CMD_RETRY;
2974 int err;
2975
2976 do
2977 {
2978 mbox->data[0] = isf;
2979 mbox->cmd.length = 1;
2980 mbox->cmd.command = FR_ISSUE_IS_FRAME;
2981 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2982 } while (err && retry-- && fr_event(card, err, mbox));
2983
2984 return err;
2985}
2986
2987
2988static unsigned int fr_send_hdr (sdla_t*card, int dlci, unsigned int offset)
2989{
2990 struct net_device *dev = find_channel(card,dlci);
2991 fr_channel_t *chan;
2992
2993 if (!dev || !(chan=dev->priv))
2994 return offset;
2995
2996 if (chan->fr_header_len){
2997 sdla_poke(&card->hw, offset, chan->fr_header, chan->fr_header_len);
2998 }
2999
3000 return offset+chan->fr_header_len;
3001}
3002
3003/*============================================================================
3004 * Send a frame on a selected DLCI.
3005 */
3006static int fr_send_data_header (sdla_t* card, int dlci, unsigned char attr, int len,
3007 void *buf, unsigned char hdr_len)
3008{
3009 fr_mbox_t* mbox = card->mbox + 0x800;
3010 int retry = MAX_CMD_RETRY;
3011 int err;
3012
3013 do
3014 {
3015 mbox->cmd.dlci = dlci;
3016 mbox->cmd.attr = attr;
3017 mbox->cmd.length = len+hdr_len;
3018 mbox->cmd.command = FR_WRITE;
3019 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
3020 } while (err && retry-- && fr_event(card, err, mbox));
3021
3022 if (!err) {
3023 fr_tx_buf_ctl_t* frbuf;
3024
3025 if(card->hw.type == SDLA_S514)
3026 frbuf = (void*)(*(unsigned long*)mbox->data +
3027 card->hw.dpmbase);
3028 else
3029 frbuf = (void*)(*(unsigned long*)mbox->data -
3030 FR_MB_VECTOR + card->hw.dpmbase);
3031
3032 sdla_poke(&card->hw, fr_send_hdr(card,dlci,frbuf->offset), buf, len);
3033 frbuf->flag = 0x01;
3034 }
3035
3036 return err;
3037}
3038
3039static int fr_send (sdla_t* card, int dlci, unsigned char attr, int len,
3040 void *buf)
3041{
3042 fr_mbox_t* mbox = card->mbox + 0x800;
3043 int retry = MAX_CMD_RETRY;
3044 int err;
3045
3046 do
3047 {
3048 mbox->cmd.dlci = dlci;
3049 mbox->cmd.attr = attr;
3050 mbox->cmd.length = len;
3051 mbox->cmd.command = FR_WRITE;
3052 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
3053 } while (err && retry-- && fr_event(card, err, mbox));
3054
3055 if (!err) {
3056 fr_tx_buf_ctl_t* frbuf;
3057
3058 if(card->hw.type == SDLA_S514)
3059 frbuf = (void*)(*(unsigned long*)mbox->data +
3060 card->hw.dpmbase);
3061 else
3062 frbuf = (void*)(*(unsigned long*)mbox->data -
3063 FR_MB_VECTOR + card->hw.dpmbase);
3064
3065 sdla_poke(&card->hw, frbuf->offset, buf, len);
3066 frbuf->flag = 0x01;
3067 }
3068
3069 return err;
3070}
3071
3072
3073/****** Firmware Asynchronous Event Handlers ********************************/
3074
3075/*============================================================================
3076 * Main asyncronous event/error handler.
3077 * This routine is called whenever firmware command returns non-zero
3078 * return code.
3079 *
3080 * Return zero if previous command has to be cancelled.
3081 */
3082static int fr_event (sdla_t *card, int event, fr_mbox_t* mbox)
3083{
3084 fr508_flags_t* flags = card->flags;
3085 char *ptr = &flags->iflag;
3086 int i;
3087
3088 switch (event) {
3089
3090 case FRRES_MODEM_FAILURE:
3091 return fr_modem_failure(card, mbox);
3092
3093 case FRRES_CHANNEL_DOWN: {
3094 struct net_device *dev;
3095
3096 /* Remove all routes from associated DLCI's */
3097 for (dev = card->wandev.dev; dev;
3098 dev = *((struct net_device **)dev->priv)) {
3099 fr_channel_t *chan = dev->priv;
3100 if (chan->route_flag == ROUTE_ADDED) {
3101 chan->route_flag = REMOVE_ROUTE;
3102 }
3103
3104 if (chan->inarp == INARP_CONFIGURED) {
3105 chan->inarp = INARP_REQUEST;
3106 }
3107
3108 /* If the link becomes disconnected then,
3109 * all channels will be disconnected
3110 * as well.
3111 */
3112 set_chan_state(dev,WAN_DISCONNECTED);
3113 }
3114
3115 wanpipe_set_state(card, WAN_DISCONNECTED);
3116 return 1;
3117 }
3118
3119 case FRRES_CHANNEL_UP: {
3120 struct net_device *dev;
3121
3122 /* FIXME: Only startup devices that are on the list */
3123
3124 for (dev = card->wandev.dev; dev;
3125 dev = *((struct net_device **)dev->priv)) {
3126
3127 set_chan_state(dev,WAN_CONNECTED);
3128 }
3129
3130 wanpipe_set_state(card, WAN_CONNECTED);
3131 return 1;
3132 }
3133
3134 case FRRES_DLCI_CHANGE:
3135 return fr_dlci_change(card, mbox);
3136
3137 case FRRES_DLCI_MISMATCH:
3138 printk(KERN_INFO "%s: DLCI list mismatch!\n",
3139 card->devname);
3140 return 1;
3141
3142 case CMD_TIMEOUT:
3143 printk(KERN_ERR "%s: command 0x%02X timed out!\n",
3144 card->devname, mbox->cmd.command);
3145 printk(KERN_INFO "%s: ID Bytes = ",card->devname);
3146 for(i = 0; i < 8; i ++)
3147 printk(KERN_INFO "0x%02X ", *(ptr + 0x18 + i));
3148 printk(KERN_INFO "\n");
3149
3150 break;
3151
3152 case FRRES_DLCI_INACTIVE:
3153 break;
3154
3155 case FRRES_CIR_OVERFLOW:
3156 break;
3157
3158 case FRRES_BUFFER_OVERFLOW:
3159 break;
3160
3161 default:
3162 printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n"
3163 , card->devname, mbox->cmd.command, event);
3164 }
3165
3166 return 0;
3167}
3168
3169/*============================================================================
3170 * Handle modem error.
3171 *
3172 * Return zero if previous command has to be cancelled.
3173 */
3174static int fr_modem_failure (sdla_t *card, fr_mbox_t* mbox)
3175{
3176 printk(KERN_INFO "%s: physical link down! (modem error 0x%02X)\n",
3177 card->devname, mbox->data[0]);
3178
3179 switch (mbox->cmd.command){
3180 case FR_WRITE:
3181
3182 case FR_READ:
3183 return 0;
3184 }
3185
3186 return 1;
3187}
3188
3189/*============================================================================
3190 * Handle DLCI status change.
3191 *
3192 * Return zero if previous command has to be cancelled.
3193 */
3194static int fr_dlci_change (sdla_t *card, fr_mbox_t* mbox)
3195{
3196 dlci_status_t* status = (void*)mbox->data;
3197 int cnt = mbox->cmd.length / sizeof(dlci_status_t);
3198 fr_channel_t *chan;
3199 struct net_device* dev2;
3200
3201
3202 for (; cnt; --cnt, ++status) {
3203
3204 unsigned short dlci= status->dlci;
3205 struct net_device* dev = find_channel(card, dlci);
3206
3207 if (dev == NULL){
3208 printk(KERN_INFO
3209 "%s: CPE contains unconfigured DLCI= %d\n",
3210 card->devname, dlci);
3211
3212 printk(KERN_INFO
3213 "%s: unconfigured DLCI %d reported by network\n"
3214 , card->devname, dlci);
3215
3216 }else{
3217 if (status->state == FR_LINK_INOPER) {
3218 printk(KERN_INFO
3219 "%s: DLCI %u is inactive!\n",
3220 card->devname, dlci);
3221
3222 if (dev && netif_running(dev))
3223 set_chan_state(dev, WAN_DISCONNECTED);
3224 }
3225
3226 if (status->state & FR_DLCI_DELETED) {
3227
3228 printk(KERN_INFO
3229 "%s: DLCI %u has been deleted!\n",
3230 card->devname, dlci);
3231
3232 if (dev && netif_running(dev)){
3233
3234 fr_channel_t *chan = dev->priv;
3235
3236 if (chan->route_flag == ROUTE_ADDED) {
3237 chan->route_flag = REMOVE_ROUTE;
3238 /* The state change will trigger
3239 * the fr polling routine */
3240 }
3241
3242 if (chan->inarp == INARP_CONFIGURED) {
3243 chan->inarp = INARP_REQUEST;
3244 }
3245
3246 set_chan_state(dev, WAN_DISCONNECTED);
3247 }
3248
3249 } else if (status->state & FR_DLCI_ACTIVE) {
3250
3251 chan = dev->priv;
3252
3253 /* This flag is used for configuring specific
3254 DLCI(s) when they become active.
3255 */
3256 chan->dlci_configured = DLCI_CONFIG_PENDING;
3257
3258 set_chan_state(dev, WAN_CONNECTED);
3259
3260 }
3261 }
3262 }
3263
3264 for (dev2 = card->wandev.dev; dev2;
3265 dev2 = *((struct net_device **)dev2->priv)){
3266
3267 chan = dev2->priv;
3268
3269 if (chan->dlci_configured == DLCI_CONFIG_PENDING) {
3270 if (fr_init_dlci(card, chan)){
3271 return 1;
3272 }
3273 }
3274
3275 }
3276 return 1;
3277}
3278
3279
3280static int fr_init_dlci (sdla_t *card, fr_channel_t *chan)
3281{
3282 fr_dlc_conf_t cfg;
3283
3284 memset(&cfg, 0, sizeof(cfg));
3285
3286 if ( chan->cir_status == CIR_DISABLED) {
3287
3288 cfg.cir_fwd = cfg.cir_bwd = 16;
3289 cfg.bc_fwd = cfg.bc_bwd = 16;
3290 cfg.conf_flags = 0x0001;
3291
3292 }else if (chan->cir_status == CIR_ENABLED) {
3293
3294 cfg.cir_fwd = cfg.cir_bwd = chan->cir;
3295 cfg.bc_fwd = cfg.bc_bwd = chan->bc;
3296 cfg.be_fwd = cfg.be_bwd = chan->be;
3297 cfg.conf_flags = 0x0000;
3298 }
3299
3300 if (fr_dlci_configure( card, &cfg , chan->dlci)){
3301 printk(KERN_INFO
3302 "%s: DLCI Configure failed for %d\n",
3303 card->devname, chan->dlci);
3304 return 1;
3305 }
3306
3307 chan->dlci_configured = DLCI_CONFIGURED;
3308
3309 /* Read the interface byte mapping into the channel
3310 * structure.
3311 */
3312 read_DLCI_IB_mapping( card, chan );
3313
3314 return 0;
3315}
3316/******* Miscellaneous ******************************************************/
3317
3318/*============================================================================
3319 * Update channel state.
3320 */
3321static int update_chan_state(struct net_device* dev)
3322{
3323 fr_channel_t* chan = dev->priv;
3324 sdla_t* card = chan->card;
3325 fr_mbox_t* mbox = card->mbox;
3326 int retry = MAX_CMD_RETRY;
3327 int err;
3328
3329 do
3330 {
3331 mbox->cmd.command = FR_LIST_ACTIVE_DLCI;
3332 mbox->cmd.length = 0;
3333 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
3334 } while (err && retry-- && fr_event(card, err, mbox));
3335
3336 if (!err) {
3337
3338 unsigned short* list = (void*)mbox->data;
3339 int cnt = mbox->cmd.length / sizeof(short);
3340
3341 err=1;
3342
3343 for (; cnt; --cnt, ++list) {
3344
3345 if (*list == chan->dlci) {
3346 set_chan_state(dev, WAN_CONNECTED);
3347
3348
3349 /* May 23 2000. NC
3350 * When a dlci is added or restarted,
3351 * the dlci_int_interface pointer must
3352 * be reinitialized. */
3353 if (!chan->dlci_int_interface){
3354 err=fr_init_dlci (card,chan);
3355 }
3356 break;
3357 }
3358 }
3359 }
3360
3361 return err;
3362}
3363
3364/*============================================================================
3365 * Set channel state.
3366 */
3367static void set_chan_state(struct net_device* dev, int state)
3368{
3369 fr_channel_t* chan = dev->priv;
3370 sdla_t* card = chan->card;
3371
3372 if (chan->common.state != state) {
3373
3374 switch (state) {
3375
3376 case WAN_CONNECTED:
3377 printk(KERN_INFO
3378 "%s: Interface %s: DLCI %d connected\n",
3379 card->devname, dev->name, chan->dlci);
3380
3381 /* If the interface was previoulsy down,
3382 * bring it up, since the channel is active */
3383
3384 trigger_fr_poll (dev);
3385 trigger_fr_arp (dev);
3386 break;
3387
3388 case WAN_CONNECTING:
3389 printk(KERN_INFO
3390 "%s: Interface %s: DLCI %d connecting\n",
3391 card->devname, dev->name, chan->dlci);
3392 break;
3393
3394 case WAN_DISCONNECTED:
3395 printk (KERN_INFO
3396 "%s: Interface %s: DLCI %d disconnected!\n",
3397 card->devname, dev->name, chan->dlci);
3398
3399 /* If the interface is up, bring it down,
3400 * since the channel is now disconnected */
3401 trigger_fr_poll (dev);
3402 break;
3403 }
3404
3405 chan->common.state = state;
3406 }
3407
3408 chan->state_tick = jiffies;
3409}
3410
3411/*============================================================================
3412 * Find network device by its channel number.
3413 *
3414 * We need this critical flag because we change
3415 * the dlci_to_dev_map outside the interrupt.
3416 *
3417 * NOTE: del_if() functions updates this array, it uses
3418 * the spin locks to avoid corruption.
3419 */
3420static struct net_device* find_channel(sdla_t* card, unsigned dlci)
3421{
3422 if(dlci > HIGHEST_VALID_DLCI)
3423 return NULL;
3424
3425 return(card->u.f.dlci_to_dev_map[dlci]);
3426}
3427
3428/*============================================================================
3429 * Check to see if a frame can be sent. If no transmit buffers available,
3430 * enable transmit interrupts.
3431 *
3432 * Return: 1 - Tx buffer(s) available
3433 * 0 - no buffers available
3434 */
3435static int is_tx_ready (sdla_t* card, fr_channel_t* chan)
3436{
3437 unsigned char sb;
3438
3439 if(card->hw.type == SDLA_S514)
3440 return 1;
3441
3442 sb = inb(card->hw.port);
3443 if (sb & 0x02)
3444 return 1;
3445
3446 return 0;
3447}
3448
3449/*============================================================================
3450 * Convert decimal string to unsigned integer.
3451 * If len != 0 then only 'len' characters of the string are converted.
3452 */
3453static unsigned int dec_to_uint (unsigned char* str, int len)
3454{
3455 unsigned val;
3456
3457 if (!len)
3458 len = strlen(str);
3459
3460 for (val = 0; len && is_digit(*str); ++str, --len)
3461 val = (val * 10) + (*str - (unsigned)'0');
3462
3463 return val;
3464}
3465
3466
3467
3468/*=============================================================================
3469 * Store a UDP management packet for later processing.
3470 */
3471
3472static int store_udp_mgmt_pkt(int udp_type, char udp_pkt_src, sdla_t* card,
3473 struct sk_buff *skb, int dlci)
3474{
3475 int udp_pkt_stored = 0;
3476
3477 struct net_device *dev = find_channel(card, dlci);
3478 fr_channel_t *chan;
3479
3480 if (!dev || !(chan=dev->priv))
3481 return 1;
3482
3483 if(!card->u.f.udp_pkt_lgth && (skb->len <= MAX_LGTH_UDP_MGNT_PKT)){
3484 card->u.f.udp_pkt_lgth = skb->len + chan->fr_header_len;
3485 card->u.f.udp_type = udp_type;
3486 card->u.f.udp_pkt_src = udp_pkt_src;
3487 card->u.f.udp_dlci = dlci;
3488 memcpy(card->u.f.udp_pkt_data, skb->data, skb->len);
3489 card->u.f.timer_int_enabled |= TMR_INT_ENABLED_UDP;
3490 udp_pkt_stored = 1;
3491
3492 }else{
3493 printk(KERN_INFO "ERROR: UDP packet not stored for DLCI %d\n",
3494 dlci);
3495 }
3496
3497 if(udp_pkt_src == UDP_PKT_FRM_STACK){
3498 dev_kfree_skb_any(skb);
3499 }else{
3500 dev_kfree_skb_any(skb);
3501 }
3502
3503 return(udp_pkt_stored);
3504}
3505
3506
3507/*==============================================================================
3508 * Process UDP call of type FPIPE8ND
3509 */
3510static int process_udp_mgmt_pkt(sdla_t* card)
3511{
3512
3513 int c_retry = MAX_CMD_RETRY;
3514 unsigned char *buf;
3515 unsigned char frames;
3516 unsigned int len;
3517 unsigned short buffer_length;
3518 struct sk_buff *new_skb;
3519 fr_mbox_t* mbox = card->mbox;
3520 int err;
3521 struct timeval tv;
3522 int udp_mgmt_req_valid = 1;
3523 struct net_device* dev;
3524 fr_channel_t* chan;
3525 fr_udp_pkt_t *fr_udp_pkt;
3526 unsigned short num_trc_els;
3527 fr_trc_el_t* ptr_trc_el;
3528 fr_trc_el_t trc_el;
3529 fpipemon_trc_t* fpipemon_trc;
3530
3531 char udp_pkt_src = card->u.f.udp_pkt_src;
3532 int dlci = card->u.f.udp_dlci;
3533
3534 /* Find network interface for this packet */
3535 dev = find_channel(card, dlci);
3536 if (!dev){
3537 card->u.f.udp_pkt_lgth = 0;
3538 return 1;
3539 }
3540 if ((chan = dev->priv) == NULL){
3541 card->u.f.udp_pkt_lgth = 0;
3542 return 1;
3543 }
3544
3545 /* If the UDP packet is from the network, we are going to have to
3546 transmit a response. Before doing so, we must check to see that
3547 we are not currently transmitting a frame (in 'if_send()') and
3548 that we are not already in a 'delayed transmit' state.
3549 */
3550 if(udp_pkt_src == UDP_PKT_FRM_NETWORK) {
3551 if (check_tx_status(card,dev)){
3552 card->u.f.udp_pkt_lgth = 0;
3553 return 1;
3554 }
3555 }
3556
3557 fr_udp_pkt = (fr_udp_pkt_t *)card->u.f.udp_pkt_data;
3558
3559 if(udp_pkt_src == UDP_PKT_FRM_NETWORK) {
3560
3561 switch(fr_udp_pkt->cblock.command) {
3562
3563 case FR_READ_MODEM_STATUS:
3564 case FR_READ_STATUS:
3565 case FPIPE_ROUTER_UP_TIME:
3566 case FR_READ_ERROR_STATS:
3567 case FPIPE_DRIVER_STAT_GEN:
3568 case FR_READ_STATISTICS:
3569 case FR_READ_ADD_DLC_STATS:
3570 case FR_READ_CONFIG:
3571 case FR_READ_CODE_VERSION:
3572 udp_mgmt_req_valid = 1;
3573 break;
3574 default:
3575 udp_mgmt_req_valid = 0;
3576 break;
3577 }
3578 }
3579
3580 if(!udp_mgmt_req_valid) {
3581 /* set length to 0 */
3582 fr_udp_pkt->cblock.length = 0;
3583 /* set return code */
3584 fr_udp_pkt->cblock.result = 0xCD;
3585
3586 chan->drvstats_gen.UDP_PIPE_mgmt_direction_err ++;
3587
3588 if (net_ratelimit()){
3589 printk(KERN_INFO
3590 "%s: Warning, Illegal UDP command attempted from network: %x\n",
3591 card->devname,fr_udp_pkt->cblock.command);
3592 }
3593
3594 } else {
3595
3596 switch(fr_udp_pkt->cblock.command) {
3597
3598 case FPIPE_ENABLE_TRACING:
3599 if(!card->TracingEnabled) {
3600 do {
3601 mbox->cmd.command = FR_SET_TRACE_CONFIG;
3602 mbox->cmd.length = 1;
3603 mbox->cmd.dlci = 0x00;
3604 mbox->data[0] = fr_udp_pkt->data[0] |
3605 RESET_TRC;
3606 err = sdla_exec(mbox) ?
3607 mbox->cmd.result : CMD_TIMEOUT;
3608 } while (err && c_retry-- && fr_event(card, err,
3609 mbox));
3610
3611 if(err) {
3612 card->TracingEnabled = 0;
3613 /* set the return code */
3614 fr_udp_pkt->cblock.result =
3615 mbox->cmd.result;
3616 mbox->cmd.length = 0;
3617 break;
3618 }
3619
3620 sdla_peek(&card->hw, NO_TRC_ELEMENTS_OFF,
3621 &num_trc_els, 2);
3622 sdla_peek(&card->hw, BASE_TRC_ELEMENTS_OFF,
3623 &card->u.f.trc_el_base, 4);
3624 card->u.f.curr_trc_el = card->u.f.trc_el_base;
3625 card->u.f.trc_el_last = card->u.f.curr_trc_el +
3626 ((num_trc_els - 1) *
3627 sizeof(fr_trc_el_t));
3628
3629 /* Calculate the maximum trace data area in */
3630 /* the UDP packet */
3631 card->u.f.trc_bfr_space=(MAX_LGTH_UDP_MGNT_PKT -
3632 //sizeof(fr_encap_hdr_t) -
3633 sizeof(ip_pkt_t) -
3634 sizeof(udp_pkt_t) -
3635 sizeof(wp_mgmt_t) -
3636 sizeof(cblock_t));
3637
3638 /* set return code */
3639 fr_udp_pkt->cblock.result = 0;
3640
3641 } else {
3642 /* set return code to line trace already
3643 enabled */
3644 fr_udp_pkt->cblock.result = 1;
3645 }
3646
3647 mbox->cmd.length = 0;
3648 card->TracingEnabled = 1;
3649 break;
3650
3651
3652 case FPIPE_DISABLE_TRACING:
3653 if(card->TracingEnabled) {
3654
3655 do {
3656 mbox->cmd.command = FR_SET_TRACE_CONFIG;
3657 mbox->cmd.length = 1;
3658 mbox->cmd.dlci = 0x00;
3659 mbox->data[0] = ~ACTIVATE_TRC;
3660 err = sdla_exec(mbox) ?
3661 mbox->cmd.result : CMD_TIMEOUT;
3662 } while (err && c_retry-- && fr_event(card, err, mbox));
3663 }
3664
3665 /* set return code */
3666 fr_udp_pkt->cblock.result = 0;
3667 mbox->cmd.length = 0;
3668 card->TracingEnabled = 0;
3669 break;
3670
3671 case FPIPE_GET_TRACE_INFO:
3672
3673 /* Line trace cannot be performed on the 502 */
3674 if(!card->TracingEnabled) {
3675 /* set return code */
3676 fr_udp_pkt->cblock.result = 1;
3677 mbox->cmd.length = 0;
3678 break;
3679 }
3680
3681 ptr_trc_el = (void *)card->u.f.curr_trc_el;
3682
3683 buffer_length = 0;
3684 fr_udp_pkt->data[0x00] = 0x00;
3685
3686 for(frames = 0; frames < MAX_FRMS_TRACED; frames ++) {
3687
3688 sdla_peek(&card->hw, (unsigned long)ptr_trc_el,
3689 (void *)&trc_el.flag,
3690 sizeof(fr_trc_el_t));
3691 if(trc_el.flag == 0x00) {
3692 break;
3693 }
3694 if((card->u.f.trc_bfr_space - buffer_length)
3695 < sizeof(fpipemon_trc_hdr_t)) {
3696 fr_udp_pkt->data[0x00] |= MORE_TRC_DATA;
3697 break;
3698 }
3699
3700 fpipemon_trc =
3701 (fpipemon_trc_t *)&fr_udp_pkt->data[buffer_length];
3702 fpipemon_trc->fpipemon_trc_hdr.status =
3703 trc_el.attr;
3704 fpipemon_trc->fpipemon_trc_hdr.tmstamp =
3705 trc_el.tmstamp;
3706 fpipemon_trc->fpipemon_trc_hdr.length =
3707 trc_el.length;
3708
3709 if(!trc_el.offset || !trc_el.length) {
3710
3711 fpipemon_trc->fpipemon_trc_hdr.data_passed = 0x00;
3712
3713 }else if((trc_el.length + sizeof(fpipemon_trc_hdr_t) + 1) >
3714 (card->u.f.trc_bfr_space - buffer_length)){
3715
3716 fpipemon_trc->fpipemon_trc_hdr.data_passed = 0x00;
3717 fr_udp_pkt->data[0x00] |= MORE_TRC_DATA;
3718
3719 }else {
3720 fpipemon_trc->fpipemon_trc_hdr.data_passed = 0x01;
3721 sdla_peek(&card->hw, trc_el.offset,
3722 fpipemon_trc->data,
3723 trc_el.length);
3724 }
3725
3726 trc_el.flag = 0x00;
3727 sdla_poke(&card->hw, (unsigned long)ptr_trc_el,
3728 &trc_el.flag, 1);
3729
3730 ptr_trc_el ++;
3731 if((void *)ptr_trc_el > card->u.f.trc_el_last)
3732 ptr_trc_el = (void*)card->u.f.trc_el_base;
3733
3734 buffer_length += sizeof(fpipemon_trc_hdr_t);
3735 if(fpipemon_trc->fpipemon_trc_hdr.data_passed) {
3736 buffer_length += trc_el.length;
3737 }
3738
3739 if(fr_udp_pkt->data[0x00] & MORE_TRC_DATA) {
3740 break;
3741 }
3742 }
3743
3744 if(frames == MAX_FRMS_TRACED) {
3745 fr_udp_pkt->data[0x00] |= MORE_TRC_DATA;
3746 }
3747
3748 card->u.f.curr_trc_el = (void *)ptr_trc_el;
3749
3750 /* set the total number of frames passed */
3751 fr_udp_pkt->data[0x00] |=
3752 ((frames << 1) & (MAX_FRMS_TRACED << 1));
3753
3754 /* set the data length and return code */
3755 fr_udp_pkt->cblock.length = mbox->cmd.length = buffer_length;
3756 fr_udp_pkt->cblock.result = 0;
3757 break;
3758
3759 case FPIPE_FT1_READ_STATUS:
3760 sdla_peek(&card->hw, 0xF020,
3761 &fr_udp_pkt->data[0x00] , 2);
3762 fr_udp_pkt->cblock.length = mbox->cmd.length = 2;
3763 fr_udp_pkt->cblock.result = 0;
3764 break;
3765
3766 case FPIPE_FLUSH_DRIVER_STATS:
3767 init_chan_statistics(chan);
3768 init_global_statistics(card);
3769 mbox->cmd.length = 0;
3770 break;
3771
3772 case FPIPE_ROUTER_UP_TIME:
3773 do_gettimeofday(&tv);
3774 chan->router_up_time = tv.tv_sec -
3775 chan->router_start_time;
3776 *(unsigned long *)&fr_udp_pkt->data =
3777 chan->router_up_time;
3778 mbox->cmd.length = fr_udp_pkt->cblock.length = 4;
3779 fr_udp_pkt->cblock.result = 0;
3780 break;
3781
3782 case FPIPE_DRIVER_STAT_IFSEND:
3783 memcpy(fr_udp_pkt->data,
3784 &chan->drvstats_if_send.if_send_entry,
3785 sizeof(if_send_stat_t));
3786 mbox->cmd.length = fr_udp_pkt->cblock.length =sizeof(if_send_stat_t);
3787 fr_udp_pkt->cblock.result = 0;
3788 break;
3789
3790 case FPIPE_DRIVER_STAT_INTR:
3791
3792 memcpy(fr_udp_pkt->data,
3793 &card->statistics.isr_entry,
3794 sizeof(global_stats_t));
3795
3796 memcpy(&fr_udp_pkt->data[sizeof(global_stats_t)],
3797 &chan->drvstats_rx_intr.rx_intr_no_socket,
3798 sizeof(rx_intr_stat_t));
3799
3800 mbox->cmd.length = fr_udp_pkt->cblock.length =
3801 sizeof(global_stats_t) +
3802 sizeof(rx_intr_stat_t);
3803 fr_udp_pkt->cblock.result = 0;
3804 break;
3805
3806 case FPIPE_DRIVER_STAT_GEN:
3807 memcpy(fr_udp_pkt->data,
3808 &chan->drvstats_gen.UDP_PIPE_mgmt_kmalloc_err,
3809 sizeof(pipe_mgmt_stat_t));
3810
3811 memcpy(&fr_udp_pkt->data[sizeof(pipe_mgmt_stat_t)],
3812 &card->statistics, sizeof(global_stats_t));
3813
3814 mbox->cmd.length = fr_udp_pkt->cblock.length = sizeof(global_stats_t)+
3815 sizeof(rx_intr_stat_t);
3816 fr_udp_pkt->cblock.result = 0;
3817 break;
3818
3819
3820 case FR_FT1_STATUS_CTRL:
3821 if(fr_udp_pkt->data[0] == 1) {
3822 if(rCount++ != 0 ){
3823 fr_udp_pkt->cblock.result = 0;
3824 mbox->cmd.length = 1;
3825 break;
3826 }
3827 }
3828
3829 /* Disable FT1 MONITOR STATUS */
3830 if(fr_udp_pkt->data[0] == 0) {
3831 if( --rCount != 0) {
3832 fr_udp_pkt->cblock.result = 0;
3833 mbox->cmd.length = 1;
3834 break;
3835 }
3836 }
3837 goto udp_mgmt_dflt;
3838
3839
3840 default:
3841udp_mgmt_dflt:
3842 do {
3843 memcpy(&mbox->cmd,
3844 &fr_udp_pkt->cblock.command,
3845 sizeof(fr_cmd_t));
3846 if(mbox->cmd.length) {
3847 memcpy(&mbox->data,
3848 (char *)fr_udp_pkt->data,
3849 mbox->cmd.length);
3850 }
3851
3852 err = sdla_exec(mbox) ? mbox->cmd.result :
3853 CMD_TIMEOUT;
3854 } while (err && c_retry-- && fr_event(card, err, mbox));
3855
3856 if(!err)
3857 chan->drvstats_gen.
3858 UDP_PIPE_mgmt_adptr_cmnd_OK ++;
3859 else
3860 chan->drvstats_gen.
3861 UDP_PIPE_mgmt_adptr_cmnd_timeout ++;
3862
3863 /* copy the result back to our buffer */
3864 memcpy(&fr_udp_pkt->cblock.command,
3865 &mbox->cmd, sizeof(fr_cmd_t));
3866
3867 if(mbox->cmd.length) {
3868 memcpy(&fr_udp_pkt->data,
3869 &mbox->data, mbox->cmd.length);
3870 }
3871 }
3872 }
3873
3874 /* Fill UDP TTL */
3875 fr_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
3876 len = reply_udp(card->u.f.udp_pkt_data, mbox->cmd.length);
3877
3878 if(udp_pkt_src == UDP_PKT_FRM_NETWORK) {
3879
3880 chan->fr_header_len=2;
3881 chan->fr_header[0]=Q922_UI;
3882 chan->fr_header[1]=NLPID_IP;
3883
3884 err = fr_send_data_header(card, dlci, 0, len,
3885 card->u.f.udp_pkt_data,chan->fr_header_len);
3886 if (err){
3887 chan->drvstats_gen.UDP_PIPE_mgmt_adptr_send_passed ++;
3888 }else{
3889 chan->drvstats_gen.UDP_PIPE_mgmt_adptr_send_failed ++;
3890 }
3891
3892 } else {
3893 /* Allocate socket buffer */
3894 if((new_skb = dev_alloc_skb(len)) != NULL) {
3895
3896 /* copy data into new_skb */
3897 buf = skb_put(new_skb, len);
3898 memcpy(buf, card->u.f.udp_pkt_data, len);
3899
3900 chan->drvstats_gen.
3901 UDP_PIPE_mgmt_passed_to_stack ++;
3902 new_skb->dev = dev;
3903 new_skb->protocol = htons(ETH_P_IP);
3904 new_skb->mac.raw = new_skb->data;
3905 netif_rx(new_skb);
3906
3907 } else {
3908 chan->drvstats_gen.UDP_PIPE_mgmt_no_socket ++;
3909 printk(KERN_INFO
3910 "%s: UDP mgmt cmnd, no socket buffers available!\n",
3911 card->devname);
3912 }
3913 }
3914
3915 card->u.f.udp_pkt_lgth = 0;
3916
3917 return 1;
3918}
3919
3920/*==============================================================================
3921 * Send Inverse ARP Request
3922 */
3923
3924int send_inarp_request(sdla_t *card, struct net_device *dev)
3925{
3926 int err=0;
3927
3928 arphdr_1490_t *ArpPacket;
3929 arphdr_fr_t *arphdr;
3930 fr_channel_t *chan = dev->priv;
3931 struct in_device *in_dev;
3932
3933 in_dev = dev->ip_ptr;
3934
3935 if(in_dev != NULL ) {
3936
3937 ArpPacket = kmalloc(sizeof(arphdr_1490_t) + sizeof(arphdr_fr_t), GFP_ATOMIC);
3938 /* SNAP Header indicating ARP */
3939 ArpPacket->control = 0x03;
3940 ArpPacket->pad = 0x00;
3941 ArpPacket->NLPID = 0x80;
3942 ArpPacket->OUI[0] = 0;
3943 ArpPacket->OUI[1] = 0;
3944 ArpPacket->OUI[2] = 0;
3945 ArpPacket->PID = 0x0608;
3946
3947 arphdr = (arphdr_fr_t *)(ArpPacket + 1); // Go to ARP Packet
3948
3949 /* InARP request */
3950 arphdr->ar_hrd = 0x0F00; /* Frame Relay HW type */
3951 arphdr->ar_pro = 0x0008; /* IP Protocol */
3952 arphdr->ar_hln = 2; /* HW addr length */
3953 arphdr->ar_pln = 4; /* IP addr length */
3954 arphdr->ar_op = htons(0x08); /* InARP Request */
3955 arphdr->ar_sha = 0; /* src HW DLCI - Doesn't matter */
3956 if(in_dev->ifa_list != NULL)
3957 arphdr->ar_sip = in_dev->ifa_list->ifa_local; /* Local Address */else
3958 arphdr->ar_sip = 0;
3959 arphdr->ar_tha = 0; /* dst HW DLCI - Doesn't matter */
3960 arphdr->ar_tip = 0; /* Remote Address -- what we want */
3961
3962 err = fr_send(card, chan->dlci, 0, sizeof(arphdr_1490_t) + sizeof(arphdr_fr_t),
3963 (void *)ArpPacket);
3964
3965 if (!err){
3966 printk(KERN_INFO "\n%s: Sending InARP request on DLCI %d.\n",
3967 card->devname, chan->dlci);
3968 clear_bit(ARP_CRIT,&card->wandev.critical);
3969 }
3970
3971 kfree(ArpPacket);
3972 }else{
3973 printk(KERN_INFO "%s: INARP ERROR: %s doesn't have a local IP address!\n",
3974 card->devname,dev->name);
3975 return 1;
3976 }
3977
3978 return 0;
3979}
3980
3981
3982/*==============================================================================
3983 * Check packet for ARP Type
3984 */
3985
3986int is_arp(void *buf)
3987{
3988 arphdr_1490_t *arphdr = (arphdr_1490_t *)buf;
3989
3990 if (arphdr->pad == 0x00 &&
3991 arphdr->NLPID == 0x80 &&
3992 arphdr->PID == 0x0608)
3993 return 1;
3994 else return 0;
3995}
3996
3997/*==============================================================================
3998 * Process ARP Packet Type
3999 */
4000
4001int process_ARP(arphdr_1490_t *ArpPacket, sdla_t *card, struct net_device* dev)
4002{
4003
4004
4005 arphdr_fr_t *arphdr = (arphdr_fr_t *)(ArpPacket + 1); /* Skip header */
4006 fr_rx_buf_ctl_t* frbuf = card->rxmb;
4007 struct in_device *in_dev;
4008 fr_channel_t *chan = dev->priv;
4009
4010 /* Before we transmit ARP packet, we must check
4011 * to see that we are not currently transmitting a
4012 * frame (in 'if_send()') and that we are not
4013 * already in a 'delayed transmit' state. */
4014 if (check_tx_status(card,dev)){
4015 if (net_ratelimit()){
4016 printk(KERN_INFO "%s: Disabling comminication to process ARP\n",
4017 card->devname);
4018 }
4019 set_bit(ARP_CRIT,&card->wandev.critical);
4020 return 0;
4021 }
4022
4023 in_dev = dev->ip_ptr;
4024
4025 /* Check that IP addresses exist for our network address */
4026 if (in_dev == NULL || in_dev->ifa_list == NULL)
4027 return -1;
4028
4029 switch (ntohs(arphdr->ar_op)) {
4030
4031 case 0x08: // Inverse ARP request -- Send Reply, add route.
4032
4033 /* Check for valid Address */
4034 printk(KERN_INFO "%s: Recvd PtP addr -InArp Req: %u.%u.%u.%u\n",
4035 card->devname, NIPQUAD(arphdr->ar_sip));
4036
4037
4038 /* Check that the network address is the same as ours, only
4039 * if the netowrk mask is not 255.255.255.255. Otherwise
4040 * this check would not make sense */
4041
4042 if (in_dev->ifa_list->ifa_mask != 0xFFFFFFFF &&
4043 (in_dev->ifa_list->ifa_mask & arphdr->ar_sip) !=
4044 (in_dev->ifa_list->ifa_mask & in_dev->ifa_list->ifa_local)){
4045 printk(KERN_INFO
4046 "%s: Invalid PtP address. %u.%u.%u.%u InARP ignored.\n",
4047 card->devname,NIPQUAD(arphdr->ar_sip));
4048
4049 printk(KERN_INFO "%s: mask %u.%u.%u.%u\n",
4050 card->devname, NIPQUAD(in_dev->ifa_list->ifa_mask));
4051 printk(KERN_INFO "%s: local %u.%u.%u.%u\n",
4052 card->devname,NIPQUAD(in_dev->ifa_list->ifa_local));
4053 return -1;
4054 }
4055
4056 if (in_dev->ifa_list->ifa_local == arphdr->ar_sip){
4057 printk(KERN_INFO
4058 "%s: Local addr = PtP addr. InARP ignored.\n",
4059 card->devname);
4060 return -1;
4061 }
4062
4063 arphdr->ar_op = htons(0x09); /* InARP Reply */
4064
4065 /* Set addresses */
4066 arphdr->ar_tip = arphdr->ar_sip;
4067 arphdr->ar_sip = in_dev->ifa_list->ifa_local;
4068
4069 chan->ip_local = in_dev->ifa_list->ifa_local;
4070 chan->ip_remote = arphdr->ar_sip;
4071
4072 fr_send(card, frbuf->dlci, 0, frbuf->length, (void *)ArpPacket);
4073
4074 if (test_bit(ARP_CRIT,&card->wandev.critical)){
4075 if (net_ratelimit()){
4076 printk(KERN_INFO "%s: ARP Processed Enabling Communication!\n",
4077 card->devname);
4078 }
4079 }
4080 clear_bit(ARP_CRIT,&card->wandev.critical);
4081
4082 chan->ip_local = in_dev->ifa_list->ifa_local;
4083 chan->ip_remote = arphdr->ar_sip;
4084
4085 /* Add Route Flag */
4086 /* The route will be added in the polling routine so
4087 that it is not interrupt context. */
4088
4089 chan->route_flag = ADD_ROUTE;
4090 trigger_fr_poll (dev);
4091
4092 break;
4093
4094 case 0x09: // Inverse ARP reply
4095
4096 /* Check for valid Address */
4097 printk(KERN_INFO "%s: Recvd PtP addr %u.%u.%u.%u -InArp Reply\n",
4098 card->devname, NIPQUAD(arphdr->ar_sip));
4099
4100
4101 /* Compare network addresses, only if network mask
4102 * is not 255.255.255.255 It would not make sense
4103 * to perform this test if the mask was all 1's */
4104
4105 if (in_dev->ifa_list->ifa_mask != 0xffffffff &&
4106 (in_dev->ifa_list->ifa_mask & arphdr->ar_sip) !=
4107 (in_dev->ifa_list->ifa_mask & in_dev->ifa_list->ifa_local)) {
4108
4109 printk(KERN_INFO "%s: Invalid PtP address. InARP ignored.\n",
4110 card->devname);
4111 return -1;
4112 }
4113
4114 /* Make sure that the received IP address is not
4115 * the same as our own local address */
4116 if (in_dev->ifa_list->ifa_local == arphdr->ar_sip) {
4117 printk(KERN_INFO "%s: Local addr = PtP addr. InARP ignored.\n",
4118 card->devname);
4119 return -1;
4120 }
4121
4122 chan->ip_local = in_dev->ifa_list->ifa_local;
4123 chan->ip_remote = arphdr->ar_sip;
4124
4125 /* Add Route Flag */
4126 /* The route will be added in the polling routine so
4127 that it is not interrupt context. */
4128
4129 chan->route_flag = ADD_ROUTE;
4130 chan->inarp = INARP_CONFIGURED;
4131 trigger_fr_poll(dev);
4132
4133 break;
4134 default:
4135 break; // ARP's and RARP's -- Shouldn't happen.
4136 }
4137
4138 return 0;
4139}
4140
4141
4142/*============================================================
4143 * trigger_fr_arp
4144 *
4145 * Description:
4146 * Add an fr_arp() task into a arp
4147 * timer handler for a specific dlci/interface.
4148 * This will kick the fr_arp() routine
4149 * within the specified time interval.
4150 *
4151 * Usage:
4152 * This timer is used to send ARP requests at
4153 * certain time intervals.
4154 * Called by an interrupt to request an action
4155 * at a later date.
4156 */
4157
4158static void trigger_fr_arp(struct net_device *dev)
4159{
4160 fr_channel_t* chan = dev->priv;
4161
4162 mod_timer(&chan->fr_arp_timer, jiffies + chan->inarp_interval * HZ);
4163 return;
4164}
4165
4166
4167
4168/*==============================================================================
4169 * ARP Request Action
4170 *
4171 * This funciton is called by timer interrupt to send an arp request
4172 * to the remote end.
4173 */
4174
4175static void fr_arp (unsigned long data)
4176{
4177 struct net_device *dev = (struct net_device *)data;
4178 fr_channel_t *chan = dev->priv;
4179 volatile sdla_t *card = chan->card;
4180 fr508_flags_t* flags = card->flags;
4181
4182 /* Send ARP packets for all devs' until
4183 * ARP state changes to CONFIGURED */
4184
4185 if (chan->inarp == INARP_REQUEST &&
4186 chan->common.state == WAN_CONNECTED &&
4187 card->wandev.state == WAN_CONNECTED){
4188 set_bit(0,&chan->inarp_ready);
4189 card->u.f.timer_int_enabled |= TMR_INT_ENABLED_ARP;
4190 flags->imask |= FR_INTR_TIMER;
4191 }
4192
4193 return;
4194}
4195
4196
4197/*==============================================================================
4198 * Perform the Interrupt Test by running the READ_CODE_VERSION command MAX_INTR_
4199 * TEST_COUNTER times.
4200 */
4201static int intr_test( sdla_t* card )
4202{
4203 fr_mbox_t* mb = card->mbox;
4204 int err,i;
4205
4206 err = fr_set_intr_mode(card, FR_INTR_READY, card->wandev.mtu, 0 );
4207
4208 if (err == CMD_OK) {
4209
4210 for ( i = 0; i < MAX_INTR_TEST_COUNTER; i++ ) {
4211 /* Run command READ_CODE_VERSION */
4212 mb->cmd.length = 0;
4213 mb->cmd.command = FR_READ_CODE_VERSION;
4214 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
4215 if (err != CMD_OK)
4216 fr_event(card, err, mb);
4217 }
4218
4219 } else {
4220 return err;
4221 }
4222
4223 err = fr_set_intr_mode( card, 0, card->wandev.mtu, 0 );
4224
4225 if( err != CMD_OK )
4226 return err;
4227
4228 return 0;
4229}
4230
4231/*==============================================================================
4232 * Determine what type of UDP call it is. FPIPE8ND ?
4233 */
4234static int udp_pkt_type( struct sk_buff *skb, sdla_t* card )
4235{
4236 fr_udp_pkt_t *fr_udp_pkt = (fr_udp_pkt_t *)skb->data;
4237
4238 /* Quick HACK */
4239
4240
4241 if((fr_udp_pkt->ip_pkt.protocol == UDPMGMT_UDP_PROTOCOL) &&
4242 (fr_udp_pkt->ip_pkt.ver_inet_hdr_length == 0x45) &&
4243 (fr_udp_pkt->udp_pkt.udp_dst_port ==
4244 ntohs(card->wandev.udp_port)) &&
4245 (fr_udp_pkt->wp_mgmt.request_reply ==
4246 UDPMGMT_REQUEST)) {
4247 if(!strncmp(fr_udp_pkt->wp_mgmt.signature,
4248 UDPMGMT_FPIPE_SIGNATURE, 8)){
4249 return UDP_FPIPE_TYPE;
4250 }
4251 }
4252 return UDP_INVALID_TYPE;
4253}
4254
4255
4256/*==============================================================================
4257 * Initializes the Statistics values in the fr_channel structure.
4258 */
4259void init_chan_statistics( fr_channel_t* chan)
4260{
4261 memset(&chan->drvstats_if_send.if_send_entry, 0,
4262 sizeof(if_send_stat_t));
4263 memset(&chan->drvstats_rx_intr.rx_intr_no_socket, 0,
4264 sizeof(rx_intr_stat_t));
4265 memset(&chan->drvstats_gen.UDP_PIPE_mgmt_kmalloc_err, 0,
4266 sizeof(pipe_mgmt_stat_t));
4267}
4268
4269/*==============================================================================
4270 * Initializes the Statistics values in the Sdla_t structure.
4271 */
4272void init_global_statistics( sdla_t* card )
4273{
4274 /* Intialize global statistics for a card */
4275 memset(&card->statistics.isr_entry, 0, sizeof(global_stats_t));
4276}
4277
4278static void read_DLCI_IB_mapping( sdla_t* card, fr_channel_t* chan )
4279{
4280 fr_mbox_t* mbox = card->mbox;
4281 int retry = MAX_CMD_RETRY;
4282 dlci_IB_mapping_t* result;
4283 int err, counter, found;
4284
4285 do {
4286 mbox->cmd.command = FR_READ_DLCI_IB_MAPPING;
4287 mbox->cmd.length = 0;
4288 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
4289 } while (err && retry-- && fr_event(card, err, mbox));
4290
4291 if( mbox->cmd.result != 0){
4292 printk(KERN_INFO "%s: Read DLCI IB Mapping failed\n",
4293 chan->name);
4294 }
4295
4296 counter = mbox->cmd.length / sizeof(dlci_IB_mapping_t);
4297 result = (void *)mbox->data;
4298
4299 found = 0;
4300 for (; counter; --counter, ++result) {
4301 if ( result->dlci == chan->dlci ) {
4302 chan->IB_addr = result->addr_value;
4303 if(card->hw.type == SDLA_S514){
4304 chan->dlci_int_interface =
4305 (void*)(card->hw.dpmbase +
4306 chan->IB_addr);
4307 }else{
4308 chan->dlci_int_interface =
4309 (void*)(card->hw.dpmbase +
4310 (chan->IB_addr & 0x00001FFF));
4311
4312 }
4313 found = 1;
4314 break;
4315 }
4316 }
4317 if (!found)
4318 printk( KERN_INFO "%s: DLCI %d not found by IB MAPPING cmd\n",
4319 card->devname, chan->dlci);
4320}
4321
4322
4323
4324void s508_s514_lock(sdla_t *card, unsigned long *smp_flags)
4325{
4326 if (card->hw.type != SDLA_S514){
4327
4328 spin_lock_irqsave(&card->wandev.lock, *smp_flags);
4329 }else{
4330 spin_lock(&card->u.f.if_send_lock);
4331 }
4332 return;
4333}
4334
4335
4336void s508_s514_unlock(sdla_t *card, unsigned long *smp_flags)
4337{
4338 if (card->hw.type != SDLA_S514){
4339
4340 spin_unlock_irqrestore (&card->wandev.lock, *smp_flags);
4341 }else{
4342 spin_unlock(&card->u.f.if_send_lock);
4343 }
4344 return;
4345}
4346
4347
4348
4349/*----------------------------------------------------------------------
4350 RECEIVE INTERRUPT: BOTTOM HALF HANDLERS
4351 ----------------------------------------------------------------------*/
4352
4353
4354/*========================================================
4355 * bh_enqueue
4356 *
4357 * Description:
4358 * Insert a received packet into a circular
4359 * rx queue. This packet will be picked up
4360 * by fr_bh() and sent up the stack to the
4361 * user.
4362 *
4363 * Usage:
4364 * This function is called by rx interrupt,
4365 * in API mode.
4366 *
4367 */
4368
4369static int bh_enqueue(struct net_device *dev, struct sk_buff *skb)
4370{
4371 /* Check for full */
4372 fr_channel_t* chan = dev->priv;
4373 sdla_t *card = chan->card;
4374
4375
4376 if (atomic_read(&chan->bh_buff_used) == MAX_BH_BUFF){
4377 ++card->wandev.stats.rx_dropped;
4378 dev_kfree_skb_any(skb);
4379 return 1;
4380 }
4381
4382 ((bh_data_t *)&chan->bh_head[chan->bh_write])->skb = skb;
4383
4384 if (chan->bh_write == (MAX_BH_BUFF-1)){
4385 chan->bh_write=0;
4386 }else{
4387 ++chan->bh_write;
4388 }
4389
4390 atomic_inc(&chan->bh_buff_used);
4391
4392 return 0;
4393}
4394
4395
4396/*========================================================
4397 * trigger_fr_bh
4398 *
4399 * Description:
4400 * Kick the fr_bh() handler
4401 *
4402 * Usage:
4403 * rx interrupt calls this function during
4404 * the API mode.
4405 */
4406
4407static void trigger_fr_bh (fr_channel_t *chan)
4408{
4409 if (!test_and_set_bit(0,&chan->tq_working)){
4410 wanpipe_queue_work(&chan->common.wanpipe_work);
4411 }
4412}
4413
4414
4415/*========================================================
4416 * fr_bh
4417 *
4418 * Description:
4419 * Frame relay receive BH handler.
4420 * Dequeue data from the BH circular
4421 * buffer and pass it up the API sock.
4422 *
4423 * Rationale:
4424 * This fuction is used to offload the
4425 * rx_interrupt during API operation mode.
4426 * The fr_bh() function executes for each
4427 * dlci/interface.
4428 *
4429 * Once receive interrupt copies data from the
4430 * card into an skb buffer, the skb buffer
4431 * is appended to a circular BH buffer.
4432 * Then the interrupt kicks fr_bh() to finish the
4433 * job at a later time (not within the interrupt).
4434 *
4435 * Usage:
4436 * Interrupts use this to defer a task to
4437 * a polling routine.
4438 *
4439 */
4440
4441static void fr_bh(struct net_device * dev)
4442{
4443 fr_channel_t* chan = dev->priv;
4444 sdla_t *card = chan->card;
4445 struct sk_buff *skb;
4446
4447 if (atomic_read(&chan->bh_buff_used) == 0){
4448 clear_bit(0, &chan->tq_working);
4449 return;
4450 }
4451
4452 while (atomic_read(&chan->bh_buff_used)){
4453
4454 if (chan->common.sk == NULL || chan->common.func == NULL){
4455 clear_bit(0, &chan->tq_working);
4456 return;
4457 }
4458
4459 skb = ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb;
4460
4461 if (skb != NULL){
4462
4463 if (chan->common.sk == NULL || chan->common.func == NULL){
4464 ++card->wandev.stats.rx_dropped;
4465 ++chan->ifstats.rx_dropped;
4466 dev_kfree_skb_any(skb);
4467 fr_bh_cleanup(dev);
4468 continue;
4469 }
4470
4471 if (chan->common.func(skb,dev,chan->common.sk) != 0){
4472 /* Sock full cannot send, queue us for
4473 * another try */
4474 atomic_set(&chan->common.receive_block,1);
4475 return;
4476 }else{
4477 fr_bh_cleanup(dev);
4478 }
4479 }else{
4480 fr_bh_cleanup(dev);
4481 }
4482 }
4483 clear_bit(0, &chan->tq_working);
4484
4485 return;
4486}
4487
4488static int fr_bh_cleanup(struct net_device *dev)
4489{
4490 fr_channel_t* chan = dev->priv;
4491
4492 ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb = NULL;
4493
4494 if (chan->bh_read == (MAX_BH_BUFF-1)){
4495 chan->bh_read=0;
4496 }else{
4497 ++chan->bh_read;
4498 }
4499
4500 atomic_dec(&chan->bh_buff_used);
4501 return 0;
4502}
4503
4504
4505/*----------------------------------------------------------------------
4506 POLL BH HANDLERS AND KICK ROUTINES
4507 ----------------------------------------------------------------------*/
4508
4509/*============================================================
4510 * trigger_fr_poll
4511 *
4512 * Description:
4513 * Add a fr_poll() task into a tq_scheduler bh handler
4514 * for a specific dlci/interface. This will kick
4515 * the fr_poll() routine at a later time.
4516 *
4517 * Usage:
4518 * Interrupts use this to defer a taks to
4519 * a polling routine.
4520 *
4521 */
4522static void trigger_fr_poll(struct net_device *dev)
4523{
4524 fr_channel_t* chan = dev->priv;
4525 schedule_work(&chan->fr_poll_work);
4526 return;
4527}
4528
4529
4530/*============================================================
4531 * fr_poll
4532 *
4533 * Rationale:
4534 * We cannot manipulate the routing tables, or
4535 * ip addresses withing the interrupt. Therefore
4536 * we must perform such actons outside an interrupt
4537 * at a later time.
4538 *
4539 * Description:
4540 * Frame relay polling routine, responsible for
4541 * shutting down interfaces upon disconnect
4542 * and adding/removing routes.
4543 *
4544 * Usage:
4545 * This function is executed for each frame relay
4546 * dlci/interface through a tq_schedule bottom half.
4547 *
4548 * trigger_fr_poll() function is used to kick
4549 * the fr_poll routine.
4550 */
4551
4552static void fr_poll(struct net_device *dev)
4553{
4554
4555 fr_channel_t* chan;
4556 sdla_t *card;
4557 u8 check_gateway=0;
4558
4559 if (!dev || (chan = dev->priv) == NULL)
4560 return;
4561
4562 card = chan->card;
4563
4564 /* (Re)Configuraiton is in progress, stop what you are
4565 * doing and get out */
4566 if (test_bit(PERI_CRIT,&card->wandev.critical)){
4567 return;
4568 }
4569
4570 switch (chan->common.state){
4571
4572 case WAN_DISCONNECTED:
4573
4574 if (test_bit(DYN_OPT_ON,&chan->interface_down) &&
4575 !test_bit(DEV_DOWN, &chan->interface_down) &&
4576 dev->flags&IFF_UP){
4577
4578 printk(KERN_INFO "%s: Interface %s is Down.\n",
4579 card->devname,dev->name);
4580 change_dev_flags(dev,dev->flags&~IFF_UP);
4581 set_bit(DEV_DOWN, &chan->interface_down);
4582 chan->route_flag = NO_ROUTE;
4583
4584 }else{
4585 if (chan->inarp != INARP_NONE)
4586 process_route(dev);
4587 }
4588 break;
4589
4590 case WAN_CONNECTED:
4591
4592 if (test_bit(DYN_OPT_ON,&chan->interface_down) &&
4593 test_bit(DEV_DOWN, &chan->interface_down) &&
4594 !(dev->flags&IFF_UP)){
4595
4596 printk(KERN_INFO "%s: Interface %s is Up.\n",
4597 card->devname,dev->name);
4598
4599 change_dev_flags(dev,dev->flags|IFF_UP);
4600 clear_bit(DEV_DOWN, &chan->interface_down);
4601 check_gateway=1;
4602 }
4603
4604 if (chan->inarp != INARP_NONE){
4605 process_route(dev);
4606 check_gateway=1;
4607 }
4608
4609 if (chan->gateway && check_gateway)
4610 add_gateway(card,dev);
4611
4612 break;
4613
4614 }
4615
4616 return;
4617}
4618
4619/*==============================================================
4620 * check_tx_status
4621 *
4622 * Rationale:
4623 * We cannot transmit from an interrupt while
4624 * the if_send is transmitting data. Therefore,
4625 * we must check whether the tx buffers are
4626 * begin used, before we transmit from an
4627 * interrupt.
4628 *
4629 * Description:
4630 * Checks whether it's safe to use the transmit
4631 * buffers.
4632 *
4633 * Usage:
4634 * ARP and UDP handling routines use this function
4635 * because, they need to transmit data during
4636 * an interrupt.
4637 */
4638
4639static int check_tx_status(sdla_t *card, struct net_device *dev)
4640{
4641
4642 if (card->hw.type == SDLA_S514){
4643 if (test_bit(SEND_CRIT, (void*)&card->wandev.critical) ||
4644 test_bit(SEND_TXIRQ_CRIT, (void*)&card->wandev.critical)) {
4645 return 1;
4646 }
4647 }
4648
4649 if (netif_queue_stopped(dev) || (card->u.f.tx_interrupts_pending))
4650 return 1;
4651
4652 return 0;
4653}
4654
4655/*===============================================================
4656 * move_dev_to_next
4657 *
4658 * Description:
4659 * Move the dev pointer to the next location in the
4660 * link list. Check if we are at the end of the
4661 * list, if so start from the begining.
4662 *
4663 * Usage:
4664 * Timer interrupt uses this function to efficiently
4665 * step through the devices that need to send ARP data.
4666 *
4667 */
4668
4669struct net_device *move_dev_to_next(sdla_t *card, struct net_device *dev)
4670{
4671 if (card->wandev.new_if_cnt != 1){
4672 if (!*((struct net_device **)dev->priv))
4673 return card->wandev.dev;
4674 else
4675 return *((struct net_device **)dev->priv);
4676 }
4677 return dev;
4678}
4679
4680/*==============================================================
4681 * trigger_config_fr
4682 *
4683 * Rationale:
4684 * All commands must be performed inside of a
4685 * interrupt.
4686 *
4687 * Description:
4688 * Kick the config_fr() routine throught the
4689 * timer interrupt.
4690 */
4691
4692
4693static void trigger_config_fr (sdla_t *card)
4694{
4695 fr508_flags_t* flags = card->flags;
4696
4697 card->u.f.timer_int_enabled |= TMR_INT_ENABLED_CONFIG;
4698 flags->imask |= FR_INTR_TIMER;
4699}
4700
4701
4702/*==============================================================
4703 * config_fr
4704 *
4705 * Rationale:
4706 * All commands must be performed inside of a
4707 * interrupt.
4708 &
4709 * Description:
4710 * Configure a DLCI. This function is executed
4711 * by a timer_interrupt. The if_open() function
4712 * triggers it.
4713 *
4714 * Usage:
4715 * new_if() collects all data necessary to
4716 * configure the DLCI. It sets the chan->dlci_ready
4717 * bit. When the if_open() function is executed
4718 * it checks this bit, and if its set it triggers
4719 * the timer interrupt to execute the config_fr()
4720 * function.
4721 */
4722
4723static void config_fr (sdla_t *card)
4724{
4725 struct net_device *dev;
4726 fr_channel_t *chan;
4727
4728 for (dev = card->wandev.dev; dev;
4729 dev = *((struct net_device **)dev->priv)) {
4730
4731 if ((chan=dev->priv) == NULL)
4732 continue;
4733
4734 if (!test_bit(0,&chan->config_dlci))
4735 continue;
4736
4737 clear_bit(0,&chan->config_dlci);
4738
4739 /* If signalling is set to NO, then setup
4740 * DLCI addresses right away. Don't have to wait for
4741 * link to connect.
4742 */
4743 if (card->wandev.signalling == WANOPT_NO){
4744 printk(KERN_INFO "%s: Signalling set to NO: Mapping DLCI's\n",
4745 card->wandev.name);
4746 if (fr_init_dlci(card,chan)){
4747 printk(KERN_INFO "%s: ERROR: Failed to configure DLCI %i !\n",
4748 card->devname, chan->dlci);
4749 return;
4750 }
4751 }
4752
4753 if (card->wandev.station == WANOPT_CPE) {
4754
4755 update_chan_state(dev);
4756
4757 /* CPE: issue full status enquiry */
4758 fr_issue_isf(card, FR_ISF_FSE);
4759
4760 } else {
4761 /* FR switch: activate DLCI(s) */
4762
4763 /* For Switch emulation we have to ADD and ACTIVATE
4764 * the DLCI(s) that were configured with the SET_DLCI_
4765 * CONFIGURATION command. Add and Activate will fail if
4766 * DLCI specified is not included in the list.
4767 *
4768 * Also If_open is called once for each interface. But
4769 * it does not get in here for all the interface. So
4770 * we have to pass the entire list of DLCI(s) to add
4771 * activate routines.
4772 */
4773
4774 if (!check_dlci_config (card, chan)){
4775 fr_add_dlci(card, chan->dlci);
4776 fr_activate_dlci(card, chan->dlci);
4777 }
4778 }
4779
4780 card->u.f.dlci_to_dev_map[chan->dlci] = dev;
4781 }
4782 return;
4783}
4784
4785
4786/*==============================================================
4787 * config_fr
4788 *
4789 * Rationale:
4790 * All commands must be executed during an interrupt.
4791 *
4792 * Description:
4793 * Trigger uncofig_fr() function through
4794 * the timer interrupt.
4795 *
4796 */
4797
4798static void trigger_unconfig_fr(struct net_device *dev)
4799{
4800 fr_channel_t *chan = dev->priv;
4801 volatile sdla_t *card = chan->card;
4802 u32 timeout;
4803 fr508_flags_t* flags = card->flags;
4804 int reset_critical=0;
4805
4806 if (test_bit(PERI_CRIT,(void*)&card->wandev.critical)){
4807 clear_bit(PERI_CRIT,(void*)&card->wandev.critical);
4808 reset_critical=1;
4809 }
4810
4811 /* run unconfig_dlci() function
4812 * throught the timer interrupt */
4813 set_bit(0,(void*)&chan->unconfig_dlci);
4814 card->u.f.timer_int_enabled |= TMR_INT_ENABLED_UNCONFIG;
4815 flags->imask |= FR_INTR_TIMER;
4816
4817 /* Wait for the command to complete */
4818 timeout = jiffies;
4819 for(;;) {
4820
4821 if(!(card->u.f.timer_int_enabled & TMR_INT_ENABLED_UNCONFIG))
4822 break;
4823
4824 if ((jiffies - timeout) > (1 * HZ)){
4825 card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UNCONFIG;
4826 printk(KERN_INFO "%s: Failed to delete DLCI %i\n",
4827 card->devname,chan->dlci);
4828 break;
4829 }
4830 }
4831
4832 if (reset_critical){
4833 set_bit(PERI_CRIT,(void*)&card->wandev.critical);
4834 }
4835}
4836
4837/*==============================================================
4838 * unconfig_fr
4839 *
4840 * Rationale:
4841 * All commands must be executed during an interrupt.
4842 *
4843 * Description:
4844 * Remove the dlci from firmware.
4845 * This funciton is used in NODE shutdown.
4846 */
4847
4848static void unconfig_fr (sdla_t *card)
4849{
4850 struct net_device *dev;
4851 fr_channel_t *chan;
4852
4853 for (dev = card->wandev.dev; dev;
4854 dev = *((struct net_device **)dev->priv)){
4855
4856 if ((chan=dev->priv) == NULL)
4857 continue;
4858
4859 if (!test_bit(0,&chan->unconfig_dlci))
4860 continue;
4861
4862 clear_bit(0,&chan->unconfig_dlci);
4863
4864 if (card->wandev.station == WANOPT_NODE){
4865 printk(KERN_INFO "%s: Unconfiguring DLCI %i\n",
4866 card->devname,chan->dlci);
4867 fr_delete_dlci(card,chan->dlci);
4868 }
4869 card->u.f.dlci_to_dev_map[chan->dlci] = NULL;
4870 }
4871}
4872
4873static int setup_fr_header(struct sk_buff **skb_orig, struct net_device* dev,
4874 char op_mode)
4875{
4876 struct sk_buff *skb = *skb_orig;
4877 fr_channel_t *chan=dev->priv;
4878
4879 if (op_mode == WANPIPE){
4880
4881 chan->fr_header[0]=Q922_UI;
4882
4883 switch (htons(skb->protocol)){
4884
4885 case ETH_P_IP:
4886 chan->fr_header[1]=NLPID_IP;
4887 break;
4888 default:
4889 return -EINVAL;
4890 }
4891
4892 return 2;
4893 }
4894
4895 /* If we are in bridging mode, we must apply
4896 * an Ethernet header */
4897 if (op_mode == BRIDGE || op_mode == BRIDGE_NODE){
4898
4899
4900 /* Encapsulate the packet as a bridged Ethernet frame. */
4901#ifdef DEBUG
4902 printk(KERN_INFO "%s: encapsulating skb for frame relay\n",
4903 dev->name);
4904#endif
4905
4906 chan->fr_header[0] = 0x03;
4907 chan->fr_header[1] = 0x00;
4908 chan->fr_header[2] = 0x80;
4909 chan->fr_header[3] = 0x00;
4910 chan->fr_header[4] = 0x80;
4911 chan->fr_header[5] = 0xC2;
4912 chan->fr_header[6] = 0x00;
4913 chan->fr_header[7] = 0x07;
4914
4915 /* Yuck. */
4916 skb->protocol = ETH_P_802_3;
4917 return 8;
4918
4919 }
4920
4921 return 0;
4922}
4923
4924
4925static int check_dlci_config (sdla_t *card, fr_channel_t *chan)
4926{
4927 fr_mbox_t* mbox = card->mbox;
4928 int err=0;
4929 fr_conf_t *conf=NULL;
4930 unsigned short dlci_num = chan->dlci;
4931 int dlci_offset=0;
4932 struct net_device *dev = NULL;
4933
4934 mbox->cmd.command = FR_READ_CONFIG;
4935 mbox->cmd.length = 0;
4936 mbox->cmd.dlci = dlci_num;
4937
4938 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
4939
4940 if (err == CMD_OK){
4941 return 0;
4942 }
4943
4944 for (dev = card->wandev.dev; dev;
4945 dev=*((struct net_device **)dev->priv))
4946 set_chan_state(dev,WAN_DISCONNECTED);
4947
4948 printk(KERN_INFO "DLCI %i Not configured, configuring\n",dlci_num);
4949
4950 mbox->cmd.command = FR_COMM_DISABLE;
4951 mbox->cmd.length = 0;
4952 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
4953 if (err != CMD_OK){
4954 fr_event(card, err, mbox);
4955 return 2;
4956 }
4957
4958 printk(KERN_INFO "Disabled Communications \n");
4959
4960 mbox->cmd.command = FR_READ_CONFIG;
4961 mbox->cmd.length = 0;
4962 mbox->cmd.dlci = 0;
4963
4964 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
4965
4966 if (err != CMD_OK){
4967 fr_event(card, err, mbox);
4968 return 2;
4969 }
4970
4971 conf = (fr_conf_t *)mbox->data;
4972
4973 dlci_offset=0;
4974 for (dev = card->wandev.dev; dev;
4975 dev = *((struct net_device **)dev->priv)) {
4976 fr_channel_t *chan_tmp = dev->priv;
4977 conf->dlci[dlci_offset] = chan_tmp->dlci;
4978 dlci_offset++;
4979 }
4980
4981 printk(KERN_INFO "Got Fr configuration Buffer Length is %x Dlci %i Dlci Off %i\n",
4982 mbox->cmd.length,
4983 mbox->cmd.length > 0x20 ? conf->dlci[0] : -1,
4984 dlci_offset );
4985
4986 mbox->cmd.length = 0x20 + dlci_offset*2;
4987
4988 mbox->cmd.command = FR_SET_CONFIG;
4989 mbox->cmd.dlci = 0;
4990
4991 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
4992
4993 if (err != CMD_OK){
4994 fr_event(card, err, mbox);
4995 return 2;
4996 }
4997
4998 initialize_rx_tx_buffers (card);
4999
5000
5001 printk(KERN_INFO "Configuraiton Succeded for new DLCI %i\n",dlci_num);
5002
5003 if (fr_comm_enable (card)){
5004 return 2;
5005 }
5006
5007 printk(KERN_INFO "Enabling Communications \n");
5008
5009 for (dev = card->wandev.dev; dev;
5010 dev = *((struct net_device **)dev->priv)) {
5011 fr_channel_t *chan_tmp = dev->priv;
5012 fr_init_dlci(card,chan_tmp);
5013 fr_add_dlci(card, chan_tmp->dlci);
5014 fr_activate_dlci(card, chan_tmp->dlci);
5015 }
5016
5017 printk(KERN_INFO "END OF CONFIGURAITON %i\n",dlci_num);
5018
5019 return 1;
5020}
5021
5022static void initialize_rx_tx_buffers (sdla_t *card)
5023{
5024 fr_buf_info_t* buf_info;
5025
5026 if (card->hw.type == SDLA_S514) {
5027
5028 buf_info = (void*)(card->hw.dpmbase + FR_MB_VECTOR +
5029 FR508_RXBC_OFFS);
5030
5031 card->rxmb = (void*)(buf_info->rse_next + card->hw.dpmbase);
5032
5033 card->u.f.rxmb_base =
5034 (void*)(buf_info->rse_base + card->hw.dpmbase);
5035
5036 card->u.f.rxmb_last =
5037 (void*)(buf_info->rse_base +
5038 (buf_info->rse_num - 1) * sizeof(fr_rx_buf_ctl_t) +
5039 card->hw.dpmbase);
5040 }else{
5041 buf_info = (void*)(card->hw.dpmbase + FR508_RXBC_OFFS);
5042
5043 card->rxmb = (void*)(buf_info->rse_next -
5044 FR_MB_VECTOR + card->hw.dpmbase);
5045
5046 card->u.f.rxmb_base =
5047 (void*)(buf_info->rse_base -
5048 FR_MB_VECTOR + card->hw.dpmbase);
5049
5050 card->u.f.rxmb_last =
5051 (void*)(buf_info->rse_base +
5052 (buf_info->rse_num - 1) * sizeof(fr_rx_buf_ctl_t) -
5053 FR_MB_VECTOR + card->hw.dpmbase);
5054 }
5055
5056 card->u.f.rx_base = buf_info->buf_base;
5057 card->u.f.rx_top = buf_info->buf_top;
5058
5059 card->u.f.tx_interrupts_pending = 0;
5060
5061 return;
5062}
5063
5064
5065
5066MODULE_LICENSE("GPL");
5067
5068/****** End *****************************************************************/
diff --git a/drivers/net/wan/sdla_ft1.c b/drivers/net/wan/sdla_ft1.c
new file mode 100644
index 000000000000..5e3124856eb0
--- /dev/null
+++ b/drivers/net/wan/sdla_ft1.c
@@ -0,0 +1,344 @@
1/*****************************************************************************
2* sdla_chdlc.c WANPIPE(tm) Multiprotocol WAN Link Driver. Cisco HDLC module.
3*
4* Authors: Nenad Corbic <ncorbic@sangoma.com>
5* Gideon Hack
6*
7* Copyright: (c) 1995-1999 Sangoma Technologies Inc.
8*
9* This program is free software; you can redistribute it and/or
10* modify it under the terms of the GNU General Public License
11* as published by the Free Software Foundation; either version
12* 2 of the License, or (at your option) any later version.
13* ============================================================================
14* Sep 30, 1999 Nenad Corbic Fixed dynamic IP and route setup.
15* Sep 23, 1999 Nenad Corbic Added SMP support, fixed tracing
16* Sep 13, 1999 Nenad Corbic Split up Port 0 and 1 into separate devices.
17* Jun 02, 1999 Gideon Hack Added support for the S514 adapter.
18* Oct 30, 1998 Jaspreet Singh Added Support for CHDLC API (HDLC STREAMING).
19* Oct 28, 1998 Jaspreet Singh Added Support for Dual Port CHDLC.
20* Aug 07, 1998 David Fong Initial version.
21*****************************************************************************/
22
23#include <linux/module.h>
24#include <linux/kernel.h> /* printk(), and other useful stuff */
25#include <linux/stddef.h> /* offsetof(), etc. */
26#include <linux/errno.h> /* return codes */
27#include <linux/string.h> /* inline memset(), etc. */
28#include <linux/slab.h> /* kmalloc(), kfree() */
29#include <linux/wanrouter.h> /* WAN router definitions */
30#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
31#include <linux/if_arp.h> /* ARPHRD_* defines */
32
33#include <linux/inetdevice.h>
34#include <asm/uaccess.h>
35
36#include <linux/in.h> /* sockaddr_in */
37#include <linux/inet.h>
38#include <linux/if.h>
39#include <asm/byteorder.h> /* htons(), etc. */
40#include <linux/sdlapci.h>
41#include <asm/io.h>
42
43#include <linux/sdla_chdlc.h> /* CHDLC firmware API definitions */
44
45/****** Defines & Macros ****************************************************/
46
47/* reasons for enabling the timer interrupt on the adapter */
48#define TMR_INT_ENABLED_UDP 0x0001
49#define TMR_INT_ENABLED_UPDATE 0x0002
50
51#define CHDLC_DFLT_DATA_LEN 1500 /* default MTU */
52#define CHDLC_HDR_LEN 1
53
54#define IFF_POINTTOPOINT 0x10
55
56#define WANPIPE 0x00
57#define API 0x01
58#define CHDLC_API 0x01
59
60#define PORT(x) (x == 0 ? "PRIMARY" : "SECONDARY" )
61
62
63/******Data Structures*****************************************************/
64
65/* This structure is placed in the private data area of the device structure.
66 * The card structure used to occupy the private area but now the following
67 * structure will incorporate the card structure along with CHDLC specific data
68 */
69
70typedef struct chdlc_private_area
71{
72 struct net_device *slave;
73 sdla_t *card;
74 int TracingEnabled; /* For enabling Tracing */
75 unsigned long curr_trace_addr; /* Used for Tracing */
76 unsigned long start_trace_addr;
77 unsigned long end_trace_addr;
78 unsigned long base_addr_trace_buffer;
79 unsigned long end_addr_trace_buffer;
80 unsigned short number_trace_elements;
81 unsigned available_buffer_space;
82 unsigned long router_start_time;
83 unsigned char route_status;
84 unsigned char route_removed;
85 unsigned long tick_counter; /* For 5s timeout counter */
86 unsigned long router_up_time;
87 u32 IP_address; /* IP addressing */
88 u32 IP_netmask;
89 unsigned char mc; /* Mulitcast support on/off */
90 unsigned short udp_pkt_lgth; /* udp packet processing */
91 char udp_pkt_src;
92 char udp_pkt_data[MAX_LGTH_UDP_MGNT_PKT];
93 unsigned short timer_int_enabled;
94 char update_comms_stats; /* updating comms stats */
95 //FIXME: add driver stats as per frame relay!
96
97} chdlc_private_area_t;
98
99/* Route Status options */
100#define NO_ROUTE 0x00
101#define ADD_ROUTE 0x01
102#define ROUTE_ADDED 0x02
103#define REMOVE_ROUTE 0x03
104
105
106/****** Function Prototypes *************************************************/
107/* WAN link driver entry points. These are called by the WAN router module. */
108static int wpft1_exec (struct sdla *card, void *u_cmd, void *u_data);
109static int chdlc_read_version (sdla_t* card, char* str);
110static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb);
111
112/****** Public Functions ****************************************************/
113
114/*============================================================================
115 * Cisco HDLC protocol initialization routine.
116 *
117 * This routine is called by the main WANPIPE module during setup. At this
118 * point adapter is completely initialized and firmware is running.
119 * o read firmware version (to make sure it's alive)
120 * o configure adapter
121 * o initialize protocol-specific fields of the adapter data space.
122 *
123 * Return: 0 o.k.
124 * < 0 failure.
125 */
126int wpft1_init (sdla_t* card, wandev_conf_t* conf)
127{
128 unsigned char port_num;
129 int err;
130
131 union
132 {
133 char str[80];
134 } u;
135 volatile CHDLC_MAILBOX_STRUCT* mb;
136 CHDLC_MAILBOX_STRUCT* mb1;
137 unsigned long timeout;
138
139 /* Verify configuration ID */
140 if (conf->config_id != WANCONFIG_CHDLC) {
141 printk(KERN_INFO "%s: invalid configuration ID %u!\n",
142 card->devname, conf->config_id);
143 return -EINVAL;
144 }
145
146 /* Use primary port */
147 card->u.c.comm_port = 0;
148
149
150 /* Initialize protocol-specific fields */
151 if(card->hw.type != SDLA_S514){
152 card->mbox = (void *) card->hw.dpmbase;
153 }else{
154 card->mbox = (void *) card->hw.dpmbase + PRI_BASE_ADDR_MB_STRUCT;
155 }
156
157 mb = mb1 = card->mbox;
158
159 if (!card->configured){
160
161 /* The board will place an 'I' in the return code to indicate that it is
162 ready to accept commands. We expect this to be completed in less
163 than 1 second. */
164
165 timeout = jiffies;
166 while (mb->return_code != 'I') /* Wait 1s for board to initialize */
167 if ((jiffies - timeout) > 1*HZ) break;
168
169 if (mb->return_code != 'I') {
170 printk(KERN_INFO
171 "%s: Initialization not completed by adapter\n",
172 card->devname);
173 printk(KERN_INFO "Please contact Sangoma representative.\n");
174 return -EIO;
175 }
176 }
177
178 /* Read firmware version. Note that when adapter initializes, it
179 * clears the mailbox, so it may appear that the first command was
180 * executed successfully when in fact it was merely erased. To work
181 * around this, we execute the first command twice.
182 */
183
184 if (chdlc_read_version(card, u.str))
185 return -EIO;
186
187 printk(KERN_INFO "%s: Running FT1 Configuration firmware v%s\n",
188 card->devname, u.str);
189
190 card->isr = NULL;
191 card->poll = NULL;
192 card->exec = &wpft1_exec;
193 card->wandev.update = NULL;
194 card->wandev.new_if = NULL;
195 card->wandev.del_if = NULL;
196 card->wandev.state = WAN_DUALPORT;
197 card->wandev.udp_port = conf->udp_port;
198
199 card->wandev.new_if_cnt = 0;
200
201 /* This is for the ports link state */
202 card->u.c.state = WAN_DISCONNECTED;
203
204 /* reset the number of times the 'update()' proc has been called */
205 card->u.c.update_call_count = 0;
206
207 card->wandev.ttl = 0x7F;
208 card->wandev.interface = 0;
209
210 card->wandev.clocking = 0;
211
212 port_num = card->u.c.comm_port;
213
214 /* Setup Port Bps */
215
216 card->wandev.bps = 0;
217
218 card->wandev.mtu = MIN_LGTH_CHDLC_DATA_CFG;
219
220 /* Set up the interrupt status area */
221 /* Read the CHDLC Configuration and obtain:
222 * Ptr to shared memory infor struct
223 * Use this pointer to calculate the value of card->u.c.flags !
224 */
225 mb1->buffer_length = 0;
226 mb1->command = READ_CHDLC_CONFIGURATION;
227 err = sdla_exec(mb1) ? mb1->return_code : CMD_TIMEOUT;
228 if(err != COMMAND_OK) {
229 chdlc_error(card, err, mb1);
230 return -EIO;
231 }
232
233 if(card->hw.type == SDLA_S514){
234 card->u.c.flags = (void *)(card->hw.dpmbase +
235 (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
236 ptr_shared_mem_info_struct));
237 }else{
238 card->u.c.flags = (void *)(card->hw.dpmbase +
239 (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
240 ptr_shared_mem_info_struct % SDLA_WINDOWSIZE));
241 }
242
243 card->wandev.state = WAN_FT1_READY;
244 printk(KERN_INFO "%s: FT1 Config Ready !\n",card->devname);
245
246 return 0;
247}
248
249static int wpft1_exec(sdla_t *card, void *u_cmd, void *u_data)
250{
251 CHDLC_MAILBOX_STRUCT* mbox = card->mbox;
252 int len;
253
254 if (copy_from_user((void*)&mbox->command, u_cmd, sizeof(ft1_exec_cmd_t))){
255 return -EFAULT;
256 }
257
258 len = mbox->buffer_length;
259
260 if (len) {
261 if( copy_from_user((void*)&mbox->data, u_data, len)){
262 return -EFAULT;
263 }
264 }
265
266 /* execute command */
267 if (!sdla_exec(mbox)){
268 return -EIO;
269 }
270
271 /* return result */
272 if( copy_to_user(u_cmd, (void*)&mbox->command, sizeof(ft1_exec_cmd_t))){
273 return -EFAULT;
274 }
275
276 len = mbox->buffer_length;
277
278 if (len && u_data && copy_to_user(u_data, (void*)&mbox->data, len)){
279 return -EFAULT;
280 }
281
282 return 0;
283
284}
285
286/*============================================================================
287 * Read firmware code version.
288 * Put code version as ASCII string in str.
289 */
290static int chdlc_read_version (sdla_t* card, char* str)
291{
292 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
293 int len;
294 char err;
295 mb->buffer_length = 0;
296 mb->command = READ_CHDLC_CODE_VERSION;
297 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
298
299 if(err != COMMAND_OK) {
300 chdlc_error(card,err,mb);
301 }
302 else if (str) { /* is not null */
303 len = mb->buffer_length;
304 memcpy(str, mb->data, len);
305 str[len] = '\0';
306 }
307 return (err);
308}
309
310/*============================================================================
311 * Firmware error handler.
312 * This routine is called whenever firmware command returns non-zero
313 * return code.
314 *
315 * Return zero if previous command has to be cancelled.
316 */
317static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb)
318{
319 unsigned cmd = mb->command;
320
321 switch (err) {
322
323 case CMD_TIMEOUT:
324 printk(KERN_ERR "%s: command 0x%02X timed out!\n",
325 card->devname, cmd);
326 break;
327
328 case S514_BOTH_PORTS_SAME_CLK_MODE:
329 if(cmd == SET_CHDLC_CONFIGURATION) {
330 printk(KERN_INFO
331 "%s: Configure both ports for the same clock source\n",
332 card->devname);
333 break;
334 }
335
336 default:
337 printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n",
338 card->devname, cmd, err);
339 }
340
341 return 0;
342}
343
344MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/sdla_ppp.c b/drivers/net/wan/sdla_ppp.c
new file mode 100644
index 000000000000..1761cb68ab48
--- /dev/null
+++ b/drivers/net/wan/sdla_ppp.c
@@ -0,0 +1,3429 @@
1/*****************************************************************************
2* sdla_ppp.c WANPIPE(tm) Multiprotocol WAN Link Driver. PPP module.
3*
4* Author: Nenad Corbic <ncorbic@sangoma.com>
5*
6* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
7*
8* This program is free software; you can redistribute it and/or
9* modify it under the terms of the GNU General Public License
10* as published by the Free Software Foundation; either version
11* 2 of the License, or (at your option) any later version.
12* ============================================================================
13* Feb 28, 2001 Nenad Corbic o Updated if_tx_timeout() routine for
14* 2.4.X kernels.
15* Nov 29, 2000 Nenad Corbic o Added the 2.4.x kernel support:
16* get_ip_address() function has moved
17* into the ppp_poll() routine. It cannot
18* be called from an interrupt.
19* Nov 07, 2000 Nenad Corbic o Added security features for UDP debugging:
20* Deny all and specify allowed requests.
21* May 02, 2000 Nenad Corbic o Added the dynamic interface shutdown
22* option. When the link goes down, the
23* network interface IFF_UP flag is reset.
24* Mar 06, 2000 Nenad Corbic o Bug Fix: corrupted mbox recovery.
25* Feb 25, 2000 Nenad Corbic o Fixed the FT1 UDP debugger problem.
26* Feb 09, 2000 Nenad Coribc o Shutdown bug fix. update() was called
27* with NULL dev pointer: no check.
28* Jan 24, 2000 Nenad Corbic o Disabled use of CMD complete inter.
29* Dev 15, 1999 Nenad Corbic o Fixed up header files for 2.0.X kernels
30* Oct 25, 1999 Nenad Corbic o Support for 2.0.X kernels
31* Moved dynamic route processing into
32* a polling routine.
33* Oct 07, 1999 Nenad Corbic o Support for S514 PCI card.
34* Gideon Hack o UPD and Updates executed using timer interrupt
35* Sep 10, 1999 Nenad Corbic o Fixed up the /proc statistics
36* Jul 20, 1999 Nenad Corbic o Remove the polling routines and use
37* interrupts instead.
38* Sep 17, 1998 Jaspreet Singh o Updates for 2.2.X Kernels.
39* Aug 13, 1998 Jaspreet Singh o Improved Line Tracing.
40* Jun 22, 1998 David Fong o Added remote IP address assignment
41* Mar 15, 1998 Alan Cox o 2.1.8x basic port.
42* Apr 16, 1998 Jaspreet Singh o using htons() for the IPX protocol.
43* Dec 09, 1997 Jaspreet Singh o Added PAP and CHAP.
44* o Implemented new routines like
45* ppp_set_inbnd_auth(), ppp_set_outbnd_auth(),
46* tokenize() and strstrip().
47* Nov 27, 1997 Jaspreet Singh o Added protection against enabling of irqs
48* while they have been disabled.
49* Nov 24, 1997 Jaspreet Singh o Fixed another RACE condition caused by
50* disabling and enabling of irqs.
51* o Added new counters for stats on disable/enable
52* IRQs.
53* Nov 10, 1997 Jaspreet Singh o Initialized 'skb->mac.raw' to 'skb->data'
54* before every netif_rx().
55* o Free up the device structure in del_if().
56* Nov 07, 1997 Jaspreet Singh o Changed the delay to zero for Line tracing
57* command.
58* Oct 20, 1997 Jaspreet Singh o Added hooks in for Router UP time.
59* Oct 16, 1997 Jaspreet Singh o The critical flag is used to maintain flow
60* control by avoiding RACE conditions. The
61* cli() and restore_flags() are taken out.
62* A new structure, "ppp_private_area", is added
63* to provide Driver Statistics.
64* Jul 21, 1997 Jaspreet Singh o Protected calls to sdla_peek() by adding
65* save_flags(), cli() and restore_flags().
66* Jul 07, 1997 Jaspreet Singh o Added configurable TTL for UDP packets
67* o Added ability to discard mulitcast and
68* broacast source addressed packets.
69* Jun 27, 1997 Jaspreet Singh o Added FT1 monitor capabilities
70* New case (0x25) statement in if_send routine.
71* Added a global variable rCount to keep track
72* of FT1 status enabled on the board.
73* May 22, 1997 Jaspreet Singh o Added change in the PPP_SET_CONFIG command for
74* 508 card to reflect changes in the new
75* ppp508.sfm for supporting:continous transmission
76* of Configure-Request packets without receiving a
77* reply
78* OR-ed 0x300 to conf_flags
79* o Changed connect_tmout from 900 to 0
80* May 21, 1997 Jaspreet Singh o Fixed UDP Management for multiple boards
81* Apr 25, 1997 Farhan Thawar o added UDP Management stuff
82* Mar 11, 1997 Farhan Thawar Version 3.1.1
83* o fixed (+1) bug in rx_intr()
84* o changed if_send() to return 0 if
85* wandev.critical() is true
86* o free socket buffer in if_send() if
87* returning 0
88* Jan 15, 1997 Gene Kozin Version 3.1.0
89* o implemented exec() entry point
90* Jan 06, 1997 Gene Kozin Initial version.
91*****************************************************************************/
92
93#include <linux/module.h>
94#include <linux/kernel.h> /* printk(), and other useful stuff */
95#include <linux/stddef.h> /* offsetof(), etc. */
96#include <linux/errno.h> /* return codes */
97#include <linux/string.h> /* inline memset(), etc. */
98#include <linux/slab.h> /* kmalloc(), kfree() */
99#include <linux/wanrouter.h> /* WAN router definitions */
100#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
101#include <linux/if_arp.h> /* ARPHRD_* defines */
102#include <asm/byteorder.h> /* htons(), etc. */
103#include <linux/in.h> /* sockaddr_in */
104
105
106#include <asm/uaccess.h>
107#include <linux/inetdevice.h>
108#include <linux/netdevice.h>
109
110#include <linux/if.h>
111#include <linux/sdla_ppp.h> /* PPP firmware API definitions */
112#include <linux/sdlasfm.h> /* S514 Type Definition */
113/****** Defines & Macros ****************************************************/
114
115#define PPP_DFLT_MTU 1500 /* default MTU */
116#define PPP_MAX_MTU 4000 /* maximum MTU */
117#define PPP_HDR_LEN 1
118
119#define MAX_IP_ERRORS 100
120
121#define CONNECT_TIMEOUT (90*HZ) /* link connection timeout */
122#define HOLD_DOWN_TIME (5*HZ) /* link hold down time : Changed from 30 to 5 */
123
124/* For handle_IPXWAN() */
125#define CVHexToAscii(b) (((unsigned char)(b) > (unsigned char)9) ? ((unsigned char)'A' + ((unsigned char)(b) - (unsigned char)10)) : ((unsigned char)'0' + (unsigned char)(b)))
126
127/* Macro for enabling/disabling debugging comments */
128//#define NEX_DEBUG
129#ifdef NEX_DEBUG
130#define NEX_PRINTK(format, a...) printk(format, ## a)
131#else
132#define NEX_PRINTK(format, a...)
133#endif /* NEX_DEBUG */
134
135#define DCD(a) ( a & 0x08 ? "HIGH" : "LOW" )
136#define CTS(a) ( a & 0x20 ? "HIGH" : "LOW" )
137#define LCP(a) ( a == 0x09 ? "OPEN" : "CLOSED" )
138#define IP(a) ( a == 0x09 ? "ENABLED" : "DISABLED" )
139
140#define TMR_INT_ENABLED_UPDATE 0x01
141#define TMR_INT_ENABLED_PPP_EVENT 0x02
142#define TMR_INT_ENABLED_UDP 0x04
143#define TMR_INT_ENABLED_CONFIG 0x20
144
145/* Set Configuraton Command Definitions */
146#define PERCENT_TX_BUFF 60
147#define TIME_BETWEEN_CONF_REQ 30
148#define TIME_BETWEEN_PAP_CHAP_REQ 30
149#define WAIT_PAP_CHAP_WITHOUT_REPLY 300
150#define WAIT_AFTER_DCD_CTS_LOW 5
151#define TIME_DCD_CTS_LOW_AFTER_LNK_DOWN 10
152#define WAIT_DCD_HIGH_AFTER_ENABLE_COMM 900
153#define MAX_CONF_REQ_WITHOUT_REPLY 10
154#define MAX_TERM_REQ_WITHOUT_REPLY 2
155#define NUM_CONF_NAK_WITHOUT_REPLY 5
156#define NUM_AUTH_REQ_WITHOUT_REPLY 10
157
158#define END_OFFSET 0x1F0
159
160
161/******Data Structures*****************************************************/
162
163/* This structure is placed in the private data area of the device structure.
164 * The card structure used to occupy the private area but now the following
165 * structure will incorporate the card structure along with PPP specific data
166 */
167
168typedef struct ppp_private_area
169{
170 struct net_device *slave;
171 sdla_t* card;
172 unsigned long router_start_time; /*router start time in sec */
173 unsigned long tick_counter; /*used for 5 second counter*/
174 unsigned mc; /*multicast support on or off*/
175 unsigned char enable_IPX;
176 unsigned long network_number;
177 unsigned char pap;
178 unsigned char chap;
179 unsigned char sysname[31]; /* system name for in-bnd auth*/
180 unsigned char userid[511]; /* list of user ids */
181 unsigned char passwd[511]; /* list of passwords */
182 unsigned protocol; /* SKB Protocol */
183 u32 ip_local; /* Local IP Address */
184 u32 ip_remote; /* remote IP Address */
185
186 u32 ip_local_tmp;
187 u32 ip_remote_tmp;
188
189 unsigned char timer_int_enabled; /* Who enabled the timer inter*/
190 unsigned char update_comms_stats; /* Used by update function */
191 unsigned long curr_trace_addr; /* Trace information */
192 unsigned long start_trace_addr;
193 unsigned long end_trace_addr;
194
195 unsigned char interface_down; /* Brind down interface when channel
196 goes down */
197 unsigned long config_wait_timeout; /* After if_open() if in dynamic if mode,
198 wait a few seconds before configuring */
199
200 unsigned short udp_pkt_lgth;
201 char udp_pkt_src;
202 char udp_pkt_data[MAX_LGTH_UDP_MGNT_PKT];
203
204 /* PPP specific statistics */
205
206 if_send_stat_t if_send_stat;
207 rx_intr_stat_t rx_intr_stat;
208 pipe_mgmt_stat_t pipe_mgmt_stat;
209
210 unsigned long router_up_time;
211
212 /* Polling work queue entry. Each interface
213 * has its own work queue entry, which is used
214 * to defer events from the interrupt */
215 struct work_struct poll_work;
216 struct timer_list poll_delay_timer;
217
218 u8 gateway;
219 u8 config_ppp;
220 u8 ip_error;
221
222}ppp_private_area_t;
223
224/* variable for keeping track of enabling/disabling FT1 monitor status */
225static int rCount = 0;
226
227extern void disable_irq(unsigned int);
228extern void enable_irq(unsigned int);
229
230/****** Function Prototypes *************************************************/
231
232/* WAN link driver entry points. These are called by the WAN router module. */
233static int update(struct wan_device *wandev);
234static int new_if(struct wan_device *wandev, struct net_device *dev,
235 wanif_conf_t *conf);
236static int del_if(struct wan_device *wandev, struct net_device *dev);
237
238/* WANPIPE-specific entry points */
239static int wpp_exec (struct sdla *card, void *u_cmd, void *u_data);
240
241/* Network device interface */
242static int if_init(struct net_device *dev);
243static int if_open(struct net_device *dev);
244static int if_close(struct net_device *dev);
245static int if_header(struct sk_buff *skb, struct net_device *dev,
246 unsigned short type,
247 void *daddr, void *saddr, unsigned len);
248
249static void if_tx_timeout(struct net_device *dev);
250
251static int if_rebuild_hdr(struct sk_buff *skb);
252static struct net_device_stats *if_stats(struct net_device *dev);
253static int if_send(struct sk_buff *skb, struct net_device *dev);
254
255
256/* PPP firmware interface functions */
257static int ppp_read_version(sdla_t *card, char *str);
258static int ppp_set_outbnd_auth(sdla_t *card, ppp_private_area_t *ppp_priv_area);
259static int ppp_set_inbnd_auth(sdla_t *card, ppp_private_area_t *ppp_priv_area);
260static int ppp_configure(sdla_t *card, void *data);
261static int ppp_set_intr_mode(sdla_t *card, unsigned char mode);
262static int ppp_comm_enable(sdla_t *card);
263static int ppp_comm_disable(sdla_t *card);
264static int ppp_comm_disable_shutdown(sdla_t *card);
265static int ppp_get_err_stats(sdla_t *card);
266static int ppp_send(sdla_t *card, void *data, unsigned len, unsigned proto);
267static int ppp_error(sdla_t *card, int err, ppp_mbox_t *mb);
268
269static void wpp_isr(sdla_t *card);
270static void rx_intr(sdla_t *card);
271static void event_intr(sdla_t *card);
272static void timer_intr(sdla_t *card);
273
274/* Background polling routines */
275static void process_route(sdla_t *card);
276static void retrigger_comm(sdla_t *card);
277
278/* Miscellaneous functions */
279static int read_info( sdla_t *card );
280static int read_connection_info (sdla_t *card);
281static void remove_route( sdla_t *card );
282static int config508(struct net_device *dev, sdla_t *card);
283static void show_disc_cause(sdla_t * card, unsigned cause);
284static int reply_udp( unsigned char *data, unsigned int mbox_len );
285static void process_udp_mgmt_pkt(sdla_t *card, struct net_device *dev,
286 ppp_private_area_t *ppp_priv_area);
287static void init_ppp_tx_rx_buff( sdla_t *card );
288static int intr_test( sdla_t *card );
289static int udp_pkt_type( struct sk_buff *skb , sdla_t *card);
290static void init_ppp_priv_struct( ppp_private_area_t *ppp_priv_area);
291static void init_global_statistics( sdla_t *card );
292static int tokenize(char *str, char **tokens);
293static char* strstrip(char *str, char *s);
294static int chk_bcast_mcast_addr(sdla_t* card, struct net_device* dev,
295 struct sk_buff *skb);
296
297static int config_ppp (sdla_t *);
298static void ppp_poll(struct net_device *dev);
299static void trigger_ppp_poll(struct net_device *dev);
300static void ppp_poll_delay (unsigned long dev_ptr);
301
302
303static int Read_connection_info;
304static int Intr_test_counter;
305static unsigned short available_buffer_space;
306
307
308/* IPX functions */
309static void switch_net_numbers(unsigned char *sendpacket, unsigned long network_number,
310 unsigned char incoming);
311static int handle_IPXWAN(unsigned char *sendpacket, char *devname, unsigned char enable_PX,
312 unsigned long network_number, unsigned short proto);
313
314/* Lock Functions */
315static void s508_lock (sdla_t *card, unsigned long *smp_flags);
316static void s508_unlock (sdla_t *card, unsigned long *smp_flags);
317
318static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
319 struct sk_buff *skb, struct net_device* dev,
320 ppp_private_area_t* ppp_priv_area );
321static unsigned short calc_checksum (char *data, int len);
322static void disable_comm (sdla_t *card);
323static int detect_and_fix_tx_bug (sdla_t *card);
324
325/****** Public Functions ****************************************************/
326
327/*============================================================================
328 * PPP protocol initialization routine.
329 *
330 * This routine is called by the main WANPIPE module during setup. At this
331 * point adapter is completely initialized and firmware is running.
332 * o read firmware version (to make sure it's alive)
333 * o configure adapter
334 * o initialize protocol-specific fields of the adapter data space.
335 *
336 * Return: 0 o.k.
337 * < 0 failure.
338 */
339int wpp_init(sdla_t *card, wandev_conf_t *conf)
340{
341 ppp_flags_t *flags;
342 union
343 {
344 char str[80];
345 } u;
346
347 /* Verify configuration ID */
348 if (conf->config_id != WANCONFIG_PPP) {
349
350 printk(KERN_INFO "%s: invalid configuration ID %u!\n",
351 card->devname, conf->config_id);
352 return -EINVAL;
353
354 }
355
356 /* Initialize miscellaneous pointers to structures on the adapter */
357 switch (card->hw.type) {
358
359 case SDLA_S508:
360 card->mbox =(void*)(card->hw.dpmbase + PPP508_MB_OFFS);
361 card->flags=(void*)(card->hw.dpmbase + PPP508_FLG_OFFS);
362 break;
363
364 case SDLA_S514:
365 card->mbox =(void*)(card->hw.dpmbase + PPP514_MB_OFFS);
366 card->flags=(void*)(card->hw.dpmbase + PPP514_FLG_OFFS);
367 break;
368
369 default:
370 return -EINVAL;
371
372 }
373 flags = card->flags;
374
375 /* Read firmware version. Note that when adapter initializes, it
376 * clears the mailbox, so it may appear that the first command was
377 * executed successfully when in fact it was merely erased. To work
378 * around this, we execute the first command twice.
379 */
380 if (ppp_read_version(card, NULL) || ppp_read_version(card, u.str))
381 return -EIO;
382
383 printk(KERN_INFO "%s: running PPP firmware v%s\n",card->devname, u.str);
384 /* Adjust configuration and set defaults */
385 card->wandev.mtu = (conf->mtu) ?
386 min_t(unsigned int, conf->mtu, PPP_MAX_MTU) : PPP_DFLT_MTU;
387
388 card->wandev.bps = conf->bps;
389 card->wandev.interface = conf->interface;
390 card->wandev.clocking = conf->clocking;
391 card->wandev.station = conf->station;
392 card->isr = &wpp_isr;
393 card->poll = NULL;
394 card->exec = &wpp_exec;
395 card->wandev.update = &update;
396 card->wandev.new_if = &new_if;
397 card->wandev.del_if = &del_if;
398 card->wandev.udp_port = conf->udp_port;
399 card->wandev.ttl = conf->ttl;
400 card->wandev.state = WAN_DISCONNECTED;
401 card->disable_comm = &disable_comm;
402 card->irq_dis_if_send_count = 0;
403 card->irq_dis_poll_count = 0;
404 card->u.p.authenticator = conf->u.ppp.authenticator;
405 card->u.p.ip_mode = conf->u.ppp.ip_mode ?
406 conf->u.ppp.ip_mode : WANOPT_PPP_STATIC;
407 card->TracingEnabled = 0;
408 Read_connection_info = 1;
409
410 /* initialize global statistics */
411 init_global_statistics( card );
412
413
414
415 if (!card->configured){
416 int err;
417
418 Intr_test_counter = 0;
419 err = intr_test(card);
420
421 if(err || (Intr_test_counter < MAX_INTR_TEST_COUNTER)) {
422 printk("%s: Interrupt Test Failed, Counter: %i\n",
423 card->devname, Intr_test_counter);
424 printk( "%s: Please choose another interrupt\n",card->devname);
425 return -EIO;
426 }
427
428 printk(KERN_INFO "%s: Interrupt Test Passed, Counter: %i\n",
429 card->devname, Intr_test_counter);
430 card->configured = 1;
431 }
432
433 ppp_set_intr_mode(card, PPP_INTR_TIMER);
434
435 /* Turn off the transmit and timer interrupt */
436 flags->imask &= ~PPP_INTR_TIMER;
437
438 printk(KERN_INFO "\n");
439
440 return 0;
441}
442
443/******* WAN Device Driver Entry Points *************************************/
444
445/*============================================================================
446 * Update device status & statistics.
447 */
448static int update(struct wan_device *wandev)
449{
450 sdla_t* card = wandev->private;
451 struct net_device* dev;
452 volatile ppp_private_area_t *ppp_priv_area;
453 ppp_flags_t *flags = card->flags;
454 unsigned long timeout;
455
456 /* sanity checks */
457 if ((wandev == NULL) || (wandev->private == NULL))
458 return -EFAULT;
459
460 if (wandev->state == WAN_UNCONFIGURED)
461 return -ENODEV;
462
463 /* Shutdown bug fix. This function can be
464 * called with NULL dev pointer during
465 * shutdown
466 */
467 if ((dev=card->wandev.dev) == NULL){
468 return -ENODEV;
469 }
470
471 if ((ppp_priv_area=dev->priv) == NULL){
472 return -ENODEV;
473 }
474
475 ppp_priv_area->update_comms_stats = 2;
476 ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_UPDATE;
477 flags->imask |= PPP_INTR_TIMER;
478
479 /* wait a maximum of 1 second for the statistics to be updated */
480 timeout = jiffies;
481 for(;;) {
482 if(ppp_priv_area->update_comms_stats == 0){
483 break;
484 }
485 if ((jiffies - timeout) > (1 * HZ)){
486 ppp_priv_area->update_comms_stats = 0;
487 ppp_priv_area->timer_int_enabled &=
488 ~TMR_INT_ENABLED_UPDATE;
489 return -EAGAIN;
490 }
491 }
492
493 return 0;
494}
495
496/*============================================================================
497 * Create new logical channel.
498 * This routine is called by the router when ROUTER_IFNEW IOCTL is being
499 * handled.
500 * o parse media- and hardware-specific configuration
501 * o make sure that a new channel can be created
502 * o allocate resources, if necessary
503 * o prepare network device structure for registaration.
504 *
505 * Return: 0 o.k.
506 * < 0 failure (channel will not be created)
507 */
508static int new_if(struct wan_device *wandev, struct net_device *dev,
509 wanif_conf_t *conf)
510{
511 sdla_t *card = wandev->private;
512 ppp_private_area_t *ppp_priv_area;
513
514 if (wandev->ndev)
515 return -EEXIST;
516
517
518 printk(KERN_INFO "%s: Configuring Interface: %s\n",
519 card->devname, conf->name);
520
521 if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)) {
522
523 printk(KERN_INFO "%s: Invalid interface name!\n",
524 card->devname);
525 return -EINVAL;
526
527 }
528
529 /* allocate and initialize private data */
530 ppp_priv_area = kmalloc(sizeof(ppp_private_area_t), GFP_KERNEL);
531
532 if( ppp_priv_area == NULL )
533 return -ENOMEM;
534
535 memset(ppp_priv_area, 0, sizeof(ppp_private_area_t));
536
537 ppp_priv_area->card = card;
538
539 /* initialize data */
540 strcpy(card->u.p.if_name, conf->name);
541
542 /* initialize data in ppp_private_area structure */
543
544 init_ppp_priv_struct( ppp_priv_area );
545
546 ppp_priv_area->mc = conf->mc;
547 ppp_priv_area->pap = conf->pap;
548 ppp_priv_area->chap = conf->chap;
549
550 /* Option to bring down the interface when
551 * the link goes down */
552 if (conf->if_down){
553 set_bit(DYN_OPT_ON,&ppp_priv_area->interface_down);
554 printk("%s: Dynamic interface configuration enabled\n",
555 card->devname);
556 }
557
558 /* If no user ids are specified */
559 if(!strlen(conf->userid) && (ppp_priv_area->pap||ppp_priv_area->chap)){
560 kfree(ppp_priv_area);
561 return -EINVAL;
562 }
563
564 /* If no passwords are specified */
565 if(!strlen(conf->passwd) && (ppp_priv_area->pap||ppp_priv_area->chap)){
566 kfree(ppp_priv_area);
567 return -EINVAL;
568 }
569
570 if(strlen(conf->sysname) > 31){
571 kfree(ppp_priv_area);
572 return -EINVAL;
573 }
574
575 /* If no system name is specified */
576 if(!strlen(conf->sysname) && (card->u.p.authenticator)){
577 kfree(ppp_priv_area);
578 return -EINVAL;
579 }
580
581 /* copy the data into the ppp private structure */
582 memcpy(ppp_priv_area->userid, conf->userid, strlen(conf->userid));
583 memcpy(ppp_priv_area->passwd, conf->passwd, strlen(conf->passwd));
584 memcpy(ppp_priv_area->sysname, conf->sysname, strlen(conf->sysname));
585
586
587 ppp_priv_area->enable_IPX = conf->enable_IPX;
588 if (conf->network_number){
589 ppp_priv_area->network_number = conf->network_number;
590 }else{
591 ppp_priv_area->network_number = 0xDEADBEEF;
592 }
593
594 /* Tells us that if this interface is a
595 * gateway or not */
596 if ((ppp_priv_area->gateway = conf->gateway) == WANOPT_YES){
597 printk(KERN_INFO "%s: Interface %s is set as a gateway.\n",
598 card->devname,card->u.p.if_name);
599 }
600
601 /* prepare network device data space for registration */
602 strcpy(dev->name,card->u.p.if_name);
603
604 dev->init = &if_init;
605 dev->priv = ppp_priv_area;
606 dev->mtu = min_t(unsigned int, dev->mtu, card->wandev.mtu);
607
608 /* Initialize the polling work routine */
609 INIT_WORK(&ppp_priv_area->poll_work, (void*)(void*)ppp_poll, dev);
610
611 /* Initialize the polling delay timer */
612 init_timer(&ppp_priv_area->poll_delay_timer);
613 ppp_priv_area->poll_delay_timer.data = (unsigned long)dev;
614 ppp_priv_area->poll_delay_timer.function = ppp_poll_delay;
615
616
617 /* Since we start with dummy IP addresses we can say
618 * that route exists */
619 printk(KERN_INFO "\n");
620
621 return 0;
622}
623
624/*============================================================================
625 * Delete logical channel.
626 */
627static int del_if(struct wan_device *wandev, struct net_device *dev)
628{
629 return 0;
630}
631
632static void disable_comm (sdla_t *card)
633{
634 ppp_comm_disable_shutdown(card);
635 return;
636}
637
638/****** WANPIPE-specific entry points ***************************************/
639
640/*============================================================================
641 * Execute adapter interface command.
642 */
643
644//FIXME: Why do we need this ????
645static int wpp_exec(struct sdla *card, void *u_cmd, void *u_data)
646{
647 ppp_mbox_t *mbox = card->mbox;
648 int len;
649
650 if (copy_from_user((void*)&mbox->cmd, u_cmd, sizeof(ppp_cmd_t)))
651 return -EFAULT;
652
653 len = mbox->cmd.length;
654
655 if (len) {
656
657 if( copy_from_user((void*)&mbox->data, u_data, len))
658 return -EFAULT;
659
660 }
661
662 /* execute command */
663 if (!sdla_exec(mbox))
664 return -EIO;
665
666 /* return result */
667 if( copy_to_user(u_cmd, (void*)&mbox->cmd, sizeof(ppp_cmd_t)))
668 return -EFAULT;
669 len = mbox->cmd.length;
670
671 if (len && u_data && copy_to_user(u_data, (void*)&mbox->data, len))
672 return -EFAULT;
673
674 return 0;
675}
676
677/****** Network Device Interface ********************************************/
678
679/*============================================================================
680 * Initialize Linux network interface.
681 *
682 * This routine is called only once for each interface, during Linux network
683 * interface registration. Returning anything but zero will fail interface
684 * registration.
685 */
686static int if_init(struct net_device *dev)
687{
688 ppp_private_area_t *ppp_priv_area = dev->priv;
689 sdla_t *card = ppp_priv_area->card;
690 struct wan_device *wandev = &card->wandev;
691
692 /* Initialize device driver entry points */
693 dev->open = &if_open;
694 dev->stop = &if_close;
695 dev->hard_header = &if_header;
696 dev->rebuild_header = &if_rebuild_hdr;
697 dev->hard_start_xmit = &if_send;
698 dev->get_stats = &if_stats;
699 dev->tx_timeout = &if_tx_timeout;
700 dev->watchdog_timeo = TX_TIMEOUT;
701
702 /* Initialize media-specific parameters */
703 dev->type = ARPHRD_PPP; /* ARP h/w type */
704 dev->flags |= IFF_POINTOPOINT;
705 dev->flags |= IFF_NOARP;
706
707 /* Enable Mulitcasting if specified by user*/
708 if (ppp_priv_area->mc == WANOPT_YES){
709 dev->flags |= IFF_MULTICAST;
710 }
711
712 dev->mtu = wandev->mtu;
713 dev->hard_header_len = PPP_HDR_LEN; /* media header length */
714
715 /* Initialize hardware parameters (just for reference) */
716 dev->irq = wandev->irq;
717 dev->dma = wandev->dma;
718 dev->base_addr = wandev->ioport;
719 dev->mem_start = wandev->maddr;
720 dev->mem_end = wandev->maddr + wandev->msize - 1;
721
722 /* Set transmit buffer queue length */
723 dev->tx_queue_len = 100;
724 SET_MODULE_OWNER(dev);
725
726 return 0;
727}
728
729/*============================================================================
730 * Open network interface.
731 * o enable communications and interrupts.
732 * o prevent module from unloading by incrementing use count
733 *
734 * Return 0 if O.k. or errno.
735 */
736static int if_open(struct net_device *dev)
737{
738 ppp_private_area_t *ppp_priv_area = dev->priv;
739 sdla_t *card = ppp_priv_area->card;
740 struct timeval tv;
741 //unsigned long smp_flags;
742
743 if (netif_running(dev))
744 return -EBUSY;
745
746 wanpipe_open(card);
747
748 netif_start_queue(dev);
749
750 do_gettimeofday( &tv );
751 ppp_priv_area->router_start_time = tv.tv_sec;
752
753 /* We cannot configure the card here because we don't
754 * have access to the interface IP addresses.
755 * Once the interface initilization is complete, we will be
756 * able to access the IP addresses. Therefore,
757 * configure the ppp link in the poll routine */
758 set_bit(0,&ppp_priv_area->config_ppp);
759 ppp_priv_area->config_wait_timeout=jiffies;
760
761 /* Start the PPP configuration after 1sec delay.
762 * This will give the interface initilization time
763 * to finish its configuration */
764 mod_timer(&ppp_priv_area->poll_delay_timer, jiffies + HZ);
765 return 0;
766}
767
768/*============================================================================
769 * Close network interface.
770 * o if this is the last open, then disable communications and interrupts.
771 * o reset flags.
772 */
773static int if_close(struct net_device *dev)
774{
775 ppp_private_area_t *ppp_priv_area = dev->priv;
776 sdla_t *card = ppp_priv_area->card;
777
778 netif_stop_queue(dev);
779 wanpipe_close(card);
780
781 del_timer (&ppp_priv_area->poll_delay_timer);
782 return 0;
783}
784
785/*============================================================================
786 * Build media header.
787 *
788 * The trick here is to put packet type (Ethertype) into 'protocol' field of
789 * the socket buffer, so that we don't forget it. If packet type is not
790 * supported, set skb->protocol to 0 and discard packet later.
791 *
792 * Return: media header length.
793 */
794static int if_header(struct sk_buff *skb, struct net_device *dev,
795 unsigned short type, void *daddr, void *saddr, unsigned len)
796{
797 switch (type)
798 {
799 case ETH_P_IP:
800 case ETH_P_IPX:
801 skb->protocol = htons(type);
802 break;
803
804 default:
805 skb->protocol = 0;
806 }
807
808 return PPP_HDR_LEN;
809}
810
811/*============================================================================
812 * Re-build media header.
813 *
814 * Return: 1 physical address resolved.
815 * 0 physical address not resolved
816 */
817static int if_rebuild_hdr (struct sk_buff *skb)
818{
819 struct net_device *dev = skb->dev;
820 ppp_private_area_t *ppp_priv_area = dev->priv;
821 sdla_t *card = ppp_priv_area->card;
822
823 printk(KERN_INFO "%s: rebuild_header() called for interface %s!\n",
824 card->devname, dev->name);
825 return 1;
826}
827
828/*============================================================================
829 * Handle transmit timeout event from netif watchdog
830 */
831static void if_tx_timeout(struct net_device *dev)
832{
833 ppp_private_area_t* chan = dev->priv;
834 sdla_t *card = chan->card;
835
836 /* If our device stays busy for at least 5 seconds then we will
837 * kick start the device by making dev->tbusy = 0. We expect
838 * that our device never stays busy more than 5 seconds. So this
839 * is only used as a last resort.
840 */
841
842 ++ chan->if_send_stat.if_send_tbusy;
843 ++card->wandev.stats.collisions;
844
845 printk (KERN_INFO "%s: Transmit timed out on %s\n", card->devname,dev->name);
846 ++chan->if_send_stat.if_send_tbusy_timeout;
847 netif_wake_queue (dev);
848}
849
850
851
852/*============================================================================
853 * Send a packet on a network interface.
854 * o set tbusy flag (marks start of the transmission) to block a timer-based
855 * transmit from overlapping.
856 * o check link state. If link is not up, then drop the packet.
857 * o execute adapter send command.
858 * o free socket buffer
859 *
860 * Return: 0 complete (socket buffer must be freed)
861 * non-0 packet may be re-transmitted (tbusy must be set)
862 *
863 * Notes:
864 * 1. This routine is called either by the protocol stack or by the "net
865 * bottom half" (with interrupts enabled).
866 * 2. Setting tbusy flag will inhibit further transmit requests from the
867 * protocol stack and can be used for flow control with protocol layer.
868 */
869static int if_send (struct sk_buff *skb, struct net_device *dev)
870{
871 ppp_private_area_t *ppp_priv_area = dev->priv;
872 sdla_t *card = ppp_priv_area->card;
873 unsigned char *sendpacket;
874 unsigned long smp_flags;
875 ppp_flags_t *flags = card->flags;
876 int udp_type;
877 int err=0;
878
879 ++ppp_priv_area->if_send_stat.if_send_entry;
880
881 netif_stop_queue(dev);
882
883 if (skb == NULL) {
884
885 /* If we get here, some higher layer thinks we've missed an
886 * tx-done interrupt.
887 */
888 printk(KERN_INFO "%s: interface %s got kicked!\n",
889 card->devname, dev->name);
890
891 ++ppp_priv_area->if_send_stat.if_send_skb_null;
892
893 netif_wake_queue(dev);
894 return 0;
895 }
896
897 sendpacket = skb->data;
898
899 udp_type = udp_pkt_type( skb, card );
900
901
902 if (udp_type == UDP_PTPIPE_TYPE){
903 if(store_udp_mgmt_pkt(UDP_PKT_FRM_STACK, card, skb, dev,
904 ppp_priv_area)){
905 flags->imask |= PPP_INTR_TIMER;
906 }
907 ++ppp_priv_area->if_send_stat.if_send_PIPE_request;
908 netif_start_queue(dev);
909 return 0;
910 }
911
912 /* Check for broadcast and multicast addresses
913 * If found, drop (deallocate) a packet and return.
914 */
915 if(chk_bcast_mcast_addr(card, dev, skb)){
916 ++card->wandev.stats.tx_dropped;
917 dev_kfree_skb_any(skb);
918 netif_start_queue(dev);
919 return 0;
920 }
921
922
923 if(card->hw.type != SDLA_S514){
924 s508_lock(card,&smp_flags);
925 }
926
927 if (test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
928
929 printk(KERN_INFO "%s: Critical in if_send: %lx\n",
930 card->wandev.name,card->wandev.critical);
931
932 ++card->wandev.stats.tx_dropped;
933 ++ppp_priv_area->if_send_stat.if_send_critical_non_ISR;
934 netif_start_queue(dev);
935 goto if_send_exit_crit;
936 }
937
938 if (card->wandev.state != WAN_CONNECTED) {
939
940 ++ppp_priv_area->if_send_stat.if_send_wan_disconnected;
941 ++card->wandev.stats.tx_dropped;
942 netif_start_queue(dev);
943
944 } else if (!skb->protocol) {
945 ++ppp_priv_area->if_send_stat.if_send_protocol_error;
946 ++card->wandev.stats.tx_errors;
947 netif_start_queue(dev);
948
949 } else {
950
951 /*If it's IPX change the network numbers to 0 if they're ours.*/
952 if( skb->protocol == htons(ETH_P_IPX) ) {
953 if(ppp_priv_area->enable_IPX) {
954 switch_net_numbers( skb->data,
955 ppp_priv_area->network_number, 0);
956 } else {
957 ++card->wandev.stats.tx_dropped;
958 netif_start_queue(dev);
959 goto if_send_exit_crit;
960 }
961 }
962
963 if (ppp_send(card, skb->data, skb->len, skb->protocol)) {
964 netif_stop_queue(dev);
965 ++ppp_priv_area->if_send_stat.if_send_adptr_bfrs_full;
966 ++ppp_priv_area->if_send_stat.if_send_tx_int_enabled;
967 } else {
968 ++ppp_priv_area->if_send_stat.if_send_bfr_passed_to_adptr;
969 ++card->wandev.stats.tx_packets;
970 card->wandev.stats.tx_bytes += skb->len;
971 netif_start_queue(dev);
972 dev->trans_start = jiffies;
973 }
974 }
975
976if_send_exit_crit:
977
978 if (!(err=netif_queue_stopped(dev))){
979 dev_kfree_skb_any(skb);
980 }else{
981 ppp_priv_area->tick_counter = jiffies;
982 flags->imask |= PPP_INTR_TXRDY; /* unmask Tx interrupts */
983 }
984
985 clear_bit(SEND_CRIT,&card->wandev.critical);
986 if(card->hw.type != SDLA_S514){
987 s508_unlock(card,&smp_flags);
988 }
989
990 return err;
991}
992
993
994/*=============================================================================
995 * Store a UDP management packet for later processing.
996 */
997
998static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
999 struct sk_buff *skb, struct net_device* dev,
1000 ppp_private_area_t* ppp_priv_area )
1001{
1002 int udp_pkt_stored = 0;
1003
1004 if(!ppp_priv_area->udp_pkt_lgth && (skb->len<=MAX_LGTH_UDP_MGNT_PKT)){
1005 ppp_priv_area->udp_pkt_lgth = skb->len;
1006 ppp_priv_area->udp_pkt_src = udp_pkt_src;
1007 memcpy(ppp_priv_area->udp_pkt_data, skb->data, skb->len);
1008 ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_UDP;
1009 ppp_priv_area->protocol = skb->protocol;
1010 udp_pkt_stored = 1;
1011 }else{
1012 if (skb->len > MAX_LGTH_UDP_MGNT_PKT){
1013 printk(KERN_INFO "%s: PIPEMON UDP request too long : %i\n",
1014 card->devname, skb->len);
1015 }else{
1016 printk(KERN_INFO "%s: PIPEMON UPD request already pending\n",
1017 card->devname);
1018 }
1019 ppp_priv_area->udp_pkt_lgth = 0;
1020 }
1021
1022 if(udp_pkt_src == UDP_PKT_FRM_STACK){
1023 dev_kfree_skb_any(skb);
1024 }else{
1025 dev_kfree_skb_any(skb);
1026 }
1027
1028 return(udp_pkt_stored);
1029}
1030
1031
1032
1033/*============================================================================
1034 * Reply to UDP Management system.
1035 * Return length of reply.
1036 */
1037static int reply_udp( unsigned char *data, unsigned int mbox_len )
1038{
1039 unsigned short len, udp_length, temp, ip_length;
1040 unsigned long ip_temp;
1041 int even_bound = 0;
1042 ppp_udp_pkt_t *p_udp_pkt = (ppp_udp_pkt_t *)data;
1043
1044 /* Set length of packet */
1045 len = sizeof(ip_pkt_t)+
1046 sizeof(udp_pkt_t)+
1047 sizeof(wp_mgmt_t)+
1048 sizeof(cblock_t)+
1049 mbox_len;
1050
1051 /* fill in UDP reply */
1052 p_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
1053
1054 /* fill in UDP length */
1055 udp_length = sizeof(udp_pkt_t)+
1056 sizeof(wp_mgmt_t)+
1057 sizeof(cblock_t)+
1058 mbox_len;
1059
1060
1061 /* put it on an even boundary */
1062 if ( udp_length & 0x0001 ) {
1063 udp_length += 1;
1064 len += 1;
1065 even_bound=1;
1066 }
1067
1068 temp = (udp_length<<8)|(udp_length>>8);
1069 p_udp_pkt->udp_pkt.udp_length = temp;
1070
1071
1072 /* swap UDP ports */
1073 temp = p_udp_pkt->udp_pkt.udp_src_port;
1074 p_udp_pkt->udp_pkt.udp_src_port =
1075 p_udp_pkt->udp_pkt.udp_dst_port;
1076 p_udp_pkt->udp_pkt.udp_dst_port = temp;
1077
1078
1079 /* add UDP pseudo header */
1080 temp = 0x1100;
1081 *((unsigned short *)(p_udp_pkt->data+mbox_len+even_bound)) = temp;
1082 temp = (udp_length<<8)|(udp_length>>8);
1083 *((unsigned short *)(p_udp_pkt->data+mbox_len+even_bound+2)) = temp;
1084
1085 /* calculate UDP checksum */
1086 p_udp_pkt->udp_pkt.udp_checksum = 0;
1087 p_udp_pkt->udp_pkt.udp_checksum =
1088 calc_checksum(&data[UDP_OFFSET],udp_length+UDP_OFFSET);
1089
1090 /* fill in IP length */
1091 ip_length = udp_length + sizeof(ip_pkt_t);
1092 temp = (ip_length<<8)|(ip_length>>8);
1093 p_udp_pkt->ip_pkt.total_length = temp;
1094
1095 /* swap IP addresses */
1096 ip_temp = p_udp_pkt->ip_pkt.ip_src_address;
1097 p_udp_pkt->ip_pkt.ip_src_address = p_udp_pkt->ip_pkt.ip_dst_address;
1098 p_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
1099
1100 /* fill in IP checksum */
1101 p_udp_pkt->ip_pkt.hdr_checksum = 0;
1102 p_udp_pkt->ip_pkt.hdr_checksum = calc_checksum(data,sizeof(ip_pkt_t));
1103
1104 return len;
1105
1106} /* reply_udp */
1107
1108unsigned short calc_checksum (char *data, int len)
1109{
1110 unsigned short temp;
1111 unsigned long sum=0;
1112 int i;
1113
1114 for( i = 0; i <len; i+=2 ) {
1115 memcpy(&temp,&data[i],2);
1116 sum += (unsigned long)temp;
1117 }
1118
1119 while (sum >> 16 ) {
1120 sum = (sum & 0xffffUL) + (sum >> 16);
1121 }
1122
1123 temp = (unsigned short)sum;
1124 temp = ~temp;
1125
1126 if( temp == 0 )
1127 temp = 0xffff;
1128
1129 return temp;
1130}
1131
1132/*
1133 If incoming is 0 (outgoing)- if the net numbers is ours make it 0
1134 if incoming is 1 - if the net number is 0 make it ours
1135
1136*/
1137static void switch_net_numbers(unsigned char *sendpacket, unsigned long network_number, unsigned char incoming)
1138{
1139 unsigned long pnetwork_number;
1140
1141 pnetwork_number = (unsigned long)((sendpacket[6] << 24) +
1142 (sendpacket[7] << 16) + (sendpacket[8] << 8) +
1143 sendpacket[9]);
1144
1145 if (!incoming) {
1146 //If the destination network number is ours, make it 0
1147 if( pnetwork_number == network_number) {
1148 sendpacket[6] = sendpacket[7] = sendpacket[8] =
1149 sendpacket[9] = 0x00;
1150 }
1151 } else {
1152 //If the incoming network is 0, make it ours
1153 if( pnetwork_number == 0) {
1154 sendpacket[6] = (unsigned char)(network_number >> 24);
1155 sendpacket[7] = (unsigned char)((network_number &
1156 0x00FF0000) >> 16);
1157 sendpacket[8] = (unsigned char)((network_number &
1158 0x0000FF00) >> 8);
1159 sendpacket[9] = (unsigned char)(network_number &
1160 0x000000FF);
1161 }
1162 }
1163
1164
1165 pnetwork_number = (unsigned long)((sendpacket[18] << 24) +
1166 (sendpacket[19] << 16) + (sendpacket[20] << 8) +
1167 sendpacket[21]);
1168
1169 if( !incoming ) {
1170 //If the source network is ours, make it 0
1171 if( pnetwork_number == network_number) {
1172 sendpacket[18] = sendpacket[19] = sendpacket[20] =
1173 sendpacket[21] = 0x00;
1174 }
1175 } else {
1176 //If the source network is 0, make it ours
1177 if( pnetwork_number == 0 ) {
1178 sendpacket[18] = (unsigned char)(network_number >> 24);
1179 sendpacket[19] = (unsigned char)((network_number &
1180 0x00FF0000) >> 16);
1181 sendpacket[20] = (unsigned char)((network_number &
1182 0x0000FF00) >> 8);
1183 sendpacket[21] = (unsigned char)(network_number &
1184 0x000000FF);
1185 }
1186 }
1187} /* switch_net_numbers */
1188
1189/*============================================================================
1190 * Get ethernet-style interface statistics.
1191 * Return a pointer to struct net_device_stats.
1192 */
1193static struct net_device_stats *if_stats(struct net_device *dev)
1194{
1195
1196 ppp_private_area_t *ppp_priv_area = dev->priv;
1197 sdla_t* card;
1198
1199 if( ppp_priv_area == NULL )
1200 return NULL;
1201
1202 card = ppp_priv_area->card;
1203 return &card->wandev.stats;
1204}
1205
1206/****** PPP Firmware Interface Functions ************************************/
1207
1208/*============================================================================
1209 * Read firmware code version.
1210 * Put code version as ASCII string in str.
1211 */
1212static int ppp_read_version(sdla_t *card, char *str)
1213{
1214 ppp_mbox_t *mb = card->mbox;
1215 int err;
1216
1217 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1218 mb->cmd.command = PPP_READ_CODE_VERSION;
1219 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1220
1221 if (err != CMD_OK)
1222
1223 ppp_error(card, err, mb);
1224
1225 else if (str) {
1226
1227 int len = mb->cmd.length;
1228
1229 memcpy(str, mb->data, len);
1230 str[len] = '\0';
1231
1232 }
1233
1234 return err;
1235}
1236/*===========================================================================
1237 * Set Out-Bound Authentication.
1238*/
1239static int ppp_set_outbnd_auth (sdla_t *card, ppp_private_area_t *ppp_priv_area)
1240{
1241 ppp_mbox_t *mb = card->mbox;
1242 int err;
1243
1244 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1245 memset(&mb->data, 0, (strlen(ppp_priv_area->userid) +
1246 strlen(ppp_priv_area->passwd) + 2 ) );
1247 memcpy(mb->data, ppp_priv_area->userid, strlen(ppp_priv_area->userid));
1248 memcpy((mb->data + strlen(ppp_priv_area->userid) + 1),
1249 ppp_priv_area->passwd, strlen(ppp_priv_area->passwd));
1250
1251 mb->cmd.length = strlen(ppp_priv_area->userid) +
1252 strlen(ppp_priv_area->passwd) + 2 ;
1253
1254 mb->cmd.command = PPP_SET_OUTBOUND_AUTH;
1255
1256 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1257
1258 if (err != CMD_OK)
1259 ppp_error(card, err, mb);
1260
1261 return err;
1262}
1263
1264/*===========================================================================
1265 * Set In-Bound Authentication.
1266*/
1267static int ppp_set_inbnd_auth (sdla_t *card, ppp_private_area_t *ppp_priv_area)
1268{
1269 ppp_mbox_t *mb = card->mbox;
1270 int err, i;
1271 char* user_tokens[32];
1272 char* pass_tokens[32];
1273 int userids, passwds;
1274 int add_ptr;
1275
1276 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1277 memset(&mb->data, 0, 1008);
1278 memcpy(mb->data, ppp_priv_area->sysname,
1279 strlen(ppp_priv_area->sysname));
1280
1281 /* Parse the userid string and the password string and build a string
1282 to copy it to the data area of the command structure. The string
1283 will look like "SYS_NAME<NULL>USER1<NULL>PASS1<NULL>USER2<NULL>PASS2
1284 ....<NULL> "
1285 */
1286 userids = tokenize( ppp_priv_area->userid, user_tokens);
1287 passwds = tokenize( ppp_priv_area->passwd, pass_tokens);
1288
1289 if (userids != passwds){
1290 printk(KERN_INFO "%s: Number of passwords does not equal the number of user ids\n", card->devname);
1291 return 1;
1292 }
1293
1294 add_ptr = strlen(ppp_priv_area->sysname) + 1;
1295 for (i=0; i<userids; i++){
1296 memcpy((mb->data + add_ptr), user_tokens[i],
1297 strlen(user_tokens[i]));
1298 memcpy((mb->data + add_ptr + strlen(user_tokens[i]) + 1),
1299 pass_tokens[i], strlen(pass_tokens[i]));
1300 add_ptr = add_ptr + strlen(user_tokens[i]) + 1 +
1301 strlen(pass_tokens[i]) + 1;
1302 }
1303
1304 mb->cmd.length = add_ptr + 1;
1305 mb->cmd.command = PPP_SET_INBOUND_AUTH;
1306
1307 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1308
1309 if (err != CMD_OK)
1310 ppp_error(card, err, mb);
1311
1312 return err;
1313}
1314
1315
1316/*============================================================================
1317 * Tokenize string.
1318 * Parse a string of the following syntax:
1319 * <arg1>,<arg2>,...
1320 * and fill array of tokens with pointers to string elements.
1321 *
1322 */
1323static int tokenize (char *str, char **tokens)
1324{
1325 int cnt = 0;
1326
1327 tokens[0] = strsep(&str, "/");
1328 while (tokens[cnt] && (cnt < 32 - 1))
1329 {
1330 tokens[cnt] = strstrip(tokens[cnt], " \t");
1331 tokens[++cnt] = strsep(&str, "/");
1332 }
1333 return cnt;
1334}
1335
1336/*============================================================================
1337 * Strip leading and trailing spaces off the string str.
1338 */
1339static char* strstrip (char *str, char* s)
1340{
1341 char *eos = str + strlen(str); /* -> end of string */
1342
1343 while (*str && strchr(s, *str))
1344 ++str /* strip leading spaces */
1345 ;
1346 while ((eos > str) && strchr(s, *(eos - 1)))
1347 --eos /* strip trailing spaces */
1348 ;
1349 *eos = '\0';
1350 return str;
1351}
1352/*============================================================================
1353 * Configure PPP firmware.
1354 */
1355static int ppp_configure(sdla_t *card, void *data)
1356{
1357 ppp_mbox_t *mb = card->mbox;
1358 int data_len = sizeof(ppp508_conf_t);
1359 int err;
1360
1361 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1362 memcpy(mb->data, data, data_len);
1363 mb->cmd.length = data_len;
1364 mb->cmd.command = PPP_SET_CONFIG;
1365 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1366
1367 if (err != CMD_OK)
1368 ppp_error(card, err, mb);
1369
1370 return err;
1371}
1372
1373/*============================================================================
1374 * Set interrupt mode.
1375 */
1376static int ppp_set_intr_mode(sdla_t *card, unsigned char mode)
1377{
1378 ppp_mbox_t *mb = card->mbox;
1379 ppp_intr_info_t *ppp_intr_data = (ppp_intr_info_t *) &mb->data[0];
1380 int err;
1381
1382 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1383 ppp_intr_data->i_enable = mode;
1384
1385 ppp_intr_data->irq = card->hw.irq;
1386 mb->cmd.length = 2;
1387
1388 /* If timer has been enabled, set the timer delay to 1sec */
1389 if (mode & 0x80){
1390 ppp_intr_data->timer_len = 250; //5;//100; //250;
1391 mb->cmd.length = 4;
1392 }
1393
1394 mb->cmd.command = PPP_SET_INTR_FLAGS;
1395 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1396
1397 if (err != CMD_OK)
1398 ppp_error(card, err, mb);
1399
1400
1401 return err;
1402}
1403
1404/*============================================================================
1405 * Enable communications.
1406 */
1407static int ppp_comm_enable(sdla_t *card)
1408{
1409 ppp_mbox_t *mb = card->mbox;
1410 int err;
1411
1412 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1413 mb->cmd.command = PPP_COMM_ENABLE;
1414 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1415
1416 if (err != CMD_OK)
1417 ppp_error(card, err, mb);
1418 else
1419 card->u.p.comm_enabled = 1;
1420
1421 return err;
1422}
1423
1424/*============================================================================
1425 * Disable communications.
1426 */
1427static int ppp_comm_disable(sdla_t *card)
1428{
1429 ppp_mbox_t *mb = card->mbox;
1430 int err;
1431
1432 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1433 mb->cmd.command = PPP_COMM_DISABLE;
1434 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1435 if (err != CMD_OK)
1436 ppp_error(card, err, mb);
1437 else
1438 card->u.p.comm_enabled = 0;
1439
1440 return err;
1441}
1442
1443static int ppp_comm_disable_shutdown(sdla_t *card)
1444{
1445 ppp_mbox_t *mb = card->mbox;
1446 ppp_intr_info_t *ppp_intr_data;
1447 int err;
1448
1449 if (!mb){
1450 return 1;
1451 }
1452
1453 ppp_intr_data = (ppp_intr_info_t *) &mb->data[0];
1454
1455 /* Disable all interrupts */
1456 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1457 ppp_intr_data->i_enable = 0;
1458
1459 ppp_intr_data->irq = card->hw.irq;
1460 mb->cmd.length = 2;
1461
1462 mb->cmd.command = PPP_SET_INTR_FLAGS;
1463 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1464
1465 /* Disable communicatinons */
1466 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1467 mb->cmd.command = PPP_COMM_DISABLE;
1468 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1469
1470 card->u.p.comm_enabled = 0;
1471
1472 return 0;
1473}
1474
1475
1476
1477/*============================================================================
1478 * Get communications error statistics.
1479 */
1480static int ppp_get_err_stats(sdla_t *card)
1481{
1482 ppp_mbox_t *mb = card->mbox;
1483 int err;
1484
1485 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
1486 mb->cmd.command = PPP_READ_ERROR_STATS;
1487 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
1488
1489 if (err == CMD_OK) {
1490
1491 ppp_err_stats_t* stats = (void*)mb->data;
1492 card->wandev.stats.rx_over_errors = stats->rx_overrun;
1493 card->wandev.stats.rx_crc_errors = stats->rx_bad_crc;
1494 card->wandev.stats.rx_missed_errors = stats->rx_abort;
1495 card->wandev.stats.rx_length_errors = stats->rx_lost;
1496 card->wandev.stats.tx_aborted_errors = stats->tx_abort;
1497
1498 } else
1499 ppp_error(card, err, mb);
1500
1501 return err;
1502}
1503
1504/*============================================================================
1505 * Send packet.
1506 * Return: 0 - o.k.
1507 * 1 - no transmit buffers available
1508 */
1509static int ppp_send (sdla_t *card, void *data, unsigned len, unsigned proto)
1510{
1511 ppp_buf_ctl_t *txbuf = card->u.p.txbuf;
1512
1513 if (txbuf->flag)
1514 return 1;
1515
1516 sdla_poke(&card->hw, txbuf->buf.ptr, data, len);
1517
1518 txbuf->length = len; /* frame length */
1519
1520 if (proto == htons(ETH_P_IPX))
1521 txbuf->proto = 0x01; /* protocol ID */
1522 else
1523 txbuf->proto = 0x00; /* protocol ID */
1524
1525 txbuf->flag = 1; /* start transmission */
1526
1527 /* Update transmit buffer control fields */
1528 card->u.p.txbuf = ++txbuf;
1529
1530 if ((void*)txbuf > card->u.p.txbuf_last)
1531 card->u.p.txbuf = card->u.p.txbuf_base;
1532
1533 return 0;
1534}
1535
1536/****** Firmware Error Handler **********************************************/
1537
1538/*============================================================================
1539 * Firmware error handler.
1540 * This routine is called whenever firmware command returns non-zero
1541 * return code.
1542 *
1543 * Return zero if previous command has to be cancelled.
1544 */
1545static int ppp_error(sdla_t *card, int err, ppp_mbox_t *mb)
1546{
1547 unsigned cmd = mb->cmd.command;
1548
1549 switch (err) {
1550
1551 case CMD_TIMEOUT:
1552 printk(KERN_ERR "%s: command 0x%02X timed out!\n",
1553 card->devname, cmd);
1554 break;
1555
1556 default:
1557 printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n"
1558 , card->devname, cmd, err);
1559 }
1560
1561 return 0;
1562}
1563
1564/****** Interrupt Handlers **************************************************/
1565
1566/*============================================================================
1567 * PPP interrupt service routine.
1568 */
1569static void wpp_isr (sdla_t *card)
1570{
1571 ppp_flags_t *flags = card->flags;
1572 char *ptr = &flags->iflag;
1573 struct net_device *dev = card->wandev.dev;
1574 int i;
1575
1576 card->in_isr = 1;
1577 ++card->statistics.isr_entry;
1578
1579 if (!dev && flags->iflag != PPP_INTR_CMD){
1580 card->in_isr = 0;
1581 flags->iflag = 0;
1582 return;
1583 }
1584
1585 if (test_bit(PERI_CRIT, (void*)&card->wandev.critical)) {
1586 card->in_isr = 0;
1587 flags->iflag = 0;
1588 return;
1589 }
1590
1591
1592 if(card->hw.type != SDLA_S514){
1593 if (test_bit(SEND_CRIT, (void*)&card->wandev.critical)) {
1594 ++card->statistics.isr_already_critical;
1595 printk (KERN_INFO "%s: Critical while in ISR!\n",
1596 card->devname);
1597 card->in_isr = 0;
1598 flags->iflag = 0;
1599 return;
1600 }
1601 }
1602
1603 switch (flags->iflag) {
1604
1605 case PPP_INTR_RXRDY: /* receive interrupt 0x01 (bit 0)*/
1606 ++card->statistics.isr_rx;
1607 rx_intr(card);
1608 break;
1609
1610 case PPP_INTR_TXRDY: /* transmit interrupt 0x02 (bit 1)*/
1611 ++card->statistics.isr_tx;
1612 flags->imask &= ~PPP_INTR_TXRDY;
1613 netif_wake_queue(dev);
1614 break;
1615
1616 case PPP_INTR_CMD: /* interface command completed */
1617 ++Intr_test_counter;
1618 ++card->statistics.isr_intr_test;
1619 break;
1620
1621 case PPP_INTR_MODEM: /* modem status change (DCD, CTS) 0x04 (bit 2)*/
1622 case PPP_INTR_DISC: /* Data link disconnected 0x10 (bit 4)*/
1623 case PPP_INTR_OPEN: /* Data link open 0x20 (bit 5)*/
1624 case PPP_INTR_DROP_DTR: /* DTR drop timeout expired 0x40 bit 6 */
1625 event_intr(card);
1626 break;
1627
1628 case PPP_INTR_TIMER:
1629 timer_intr(card);
1630 break;
1631
1632 default: /* unexpected interrupt */
1633 ++card->statistics.isr_spurious;
1634 printk(KERN_INFO "%s: spurious interrupt 0x%02X!\n",
1635 card->devname, flags->iflag);
1636 printk(KERN_INFO "%s: ID Bytes = ",card->devname);
1637 for(i = 0; i < 8; i ++)
1638 printk(KERN_INFO "0x%02X ", *(ptr + 0x28 + i));
1639 printk(KERN_INFO "\n");
1640 }
1641
1642 card->in_isr = 0;
1643 flags->iflag = 0;
1644 return;
1645}
1646
1647/*============================================================================
1648 * Receive interrupt handler.
1649 */
1650static void rx_intr(sdla_t *card)
1651{
1652 ppp_buf_ctl_t *rxbuf = card->rxmb;
1653 struct net_device *dev = card->wandev.dev;
1654 ppp_private_area_t *ppp_priv_area;
1655 struct sk_buff *skb;
1656 unsigned len;
1657 void *buf;
1658 int i;
1659 ppp_flags_t *flags = card->flags;
1660 char *ptr = &flags->iflag;
1661 int udp_type;
1662
1663
1664 if (rxbuf->flag != 0x01) {
1665
1666 printk(KERN_INFO
1667 "%s: corrupted Rx buffer @ 0x%X, flag = 0x%02X!\n",
1668 card->devname, (unsigned)rxbuf, rxbuf->flag);
1669
1670 printk(KERN_INFO "%s: ID Bytes = ",card->devname);
1671
1672 for(i = 0; i < 8; i ++)
1673 printk(KERN_INFO "0x%02X ", *(ptr + 0x28 + i));
1674 printk(KERN_INFO "\n");
1675
1676 ++card->statistics.rx_intr_corrupt_rx_bfr;
1677
1678
1679 /* Bug Fix: Mar 6 2000
1680 * If we get a corrupted mailbox, it means that driver
1681 * is out of sync with the firmware. There is no recovery.
1682 * If we don't turn off all interrupts for this card
1683 * the machine will crash.
1684 */
1685 printk(KERN_INFO "%s: Critical router failure ...!!!\n", card->devname);
1686 printk(KERN_INFO "Please contact Sangoma Technologies !\n");
1687 ppp_set_intr_mode(card,0);
1688 return;
1689 }
1690
1691 if (dev && netif_running(dev) && dev->priv){
1692
1693 len = rxbuf->length;
1694 ppp_priv_area = dev->priv;
1695
1696 /* Allocate socket buffer */
1697 skb = dev_alloc_skb(len);
1698
1699 if (skb != NULL) {
1700
1701 /* Copy data to the socket buffer */
1702 unsigned addr = rxbuf->buf.ptr;
1703
1704 if ((addr + len) > card->u.p.rx_top + 1) {
1705
1706 unsigned tmp = card->u.p.rx_top - addr + 1;
1707 buf = skb_put(skb, tmp);
1708 sdla_peek(&card->hw, addr, buf, tmp);
1709 addr = card->u.p.rx_base;
1710 len -= tmp;
1711 }
1712 buf = skb_put(skb, len);
1713 sdla_peek(&card->hw, addr, buf, len);
1714
1715 /* Decapsulate packet */
1716 switch (rxbuf->proto) {
1717
1718 case 0x00:
1719 skb->protocol = htons(ETH_P_IP);
1720 break;
1721
1722 case 0x01:
1723 skb->protocol = htons(ETH_P_IPX);
1724 break;
1725 }
1726
1727 udp_type = udp_pkt_type( skb, card );
1728
1729 if (udp_type == UDP_PTPIPE_TYPE){
1730
1731 /* Handle a UDP Request in Timer Interrupt */
1732 if(store_udp_mgmt_pkt(UDP_PKT_FRM_NETWORK, card, skb, dev,
1733 ppp_priv_area)){
1734 flags->imask |= PPP_INTR_TIMER;
1735 }
1736 ++ppp_priv_area->rx_intr_stat.rx_intr_PIPE_request;
1737
1738
1739 } else if (handle_IPXWAN(skb->data,card->devname,
1740 ppp_priv_area->enable_IPX,
1741 ppp_priv_area->network_number,
1742 skb->protocol)) {
1743
1744 /* Handle an IPXWAN packet */
1745 if( ppp_priv_area->enable_IPX) {
1746
1747 /* Make sure we are not already sending */
1748 if (!test_bit(SEND_CRIT, &card->wandev.critical)){
1749 ppp_send(card, skb->data, skb->len, htons(ETH_P_IPX));
1750 }
1751 dev_kfree_skb_any(skb);
1752
1753 } else {
1754 ++card->wandev.stats.rx_dropped;
1755 }
1756 } else {
1757 /* Pass data up the protocol stack */
1758 skb->dev = dev;
1759 skb->mac.raw = skb->data;
1760
1761 ++card->wandev.stats.rx_packets;
1762 card->wandev.stats.rx_bytes += skb->len;
1763 ++ppp_priv_area->rx_intr_stat.rx_intr_bfr_passed_to_stack;
1764 netif_rx(skb);
1765 dev->last_rx = jiffies;
1766 }
1767
1768 } else {
1769
1770 if (net_ratelimit()){
1771 printk(KERN_INFO "%s: no socket buffers available!\n",
1772 card->devname);
1773 }
1774 ++card->wandev.stats.rx_dropped;
1775 ++ppp_priv_area->rx_intr_stat.rx_intr_no_socket;
1776 }
1777
1778 } else {
1779 ++card->statistics.rx_intr_dev_not_started;
1780 }
1781
1782 /* Release buffer element and calculate a pointer to the next one */
1783 rxbuf->flag = 0x00;
1784 card->rxmb = ++rxbuf;
1785 if ((void*)rxbuf > card->u.p.rxbuf_last)
1786 card->rxmb = card->u.p.rxbuf_base;
1787}
1788
1789
1790void event_intr (sdla_t *card)
1791{
1792
1793 struct net_device* dev = card->wandev.dev;
1794 ppp_private_area_t* ppp_priv_area = dev->priv;
1795 volatile ppp_flags_t *flags = card->flags;
1796
1797 switch (flags->iflag){
1798
1799 case PPP_INTR_MODEM: /* modem status change (DCD, CTS) 0x04 (bit 2)*/
1800
1801 if (net_ratelimit()){
1802 printk (KERN_INFO "%s: Modem status: DCD=%s CTS=%s\n",
1803 card->devname, DCD(flags->mstatus), CTS(flags->mstatus));
1804 }
1805 break;
1806
1807 case PPP_INTR_DISC: /* Data link disconnected 0x10 (bit 4)*/
1808
1809 NEX_PRINTK (KERN_INFO "Data link disconnected intr Cause %X\n",
1810 flags->disc_cause);
1811
1812 if (flags->disc_cause &
1813 (PPP_LOCAL_TERMINATION | PPP_DCD_CTS_DROP |
1814 PPP_REMOTE_TERMINATION)) {
1815
1816 if (card->u.p.ip_mode == WANOPT_PPP_PEER) {
1817 set_bit(0,&Read_connection_info);
1818 }
1819 wanpipe_set_state(card, WAN_DISCONNECTED);
1820
1821 show_disc_cause(card, flags->disc_cause);
1822 ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_PPP_EVENT;
1823 flags->imask |= PPP_INTR_TIMER;
1824 trigger_ppp_poll(dev);
1825 }
1826 break;
1827
1828 case PPP_INTR_OPEN: /* Data link open 0x20 (bit 5)*/
1829
1830 NEX_PRINTK (KERN_INFO "%s: PPP Link Open, LCP=%s IP=%s\n",
1831 card->devname,LCP(flags->lcp_state),
1832 IP(flags->ip_state));
1833
1834 if (flags->lcp_state == 0x09 &&
1835 (flags->ip_state == 0x09 || flags->ipx_state == 0x09)){
1836
1837 /* Initialize the polling timer and set the state
1838 * to WAN_CONNNECTED */
1839
1840
1841 /* BUG FIX: When the protocol restarts, during heavy
1842 * traffic, board tx buffers and driver tx buffers
1843 * can go out of sync. This checks the condition
1844 * and if the tx buffers are out of sync, the
1845 * protocols are restarted.
1846 * I don't know why the board tx buffer is out
1847 * of sync. It could be that a packets is tx
1848 * while the link is down, but that is not
1849 * possible. The other possiblility is that the
1850 * firmware doesn't reinitialize properly.
1851 * FIXME: A better fix should be found.
1852 */
1853 if (detect_and_fix_tx_bug(card)){
1854
1855 ppp_comm_disable(card);
1856
1857 wanpipe_set_state(card, WAN_DISCONNECTED);
1858
1859 ppp_priv_area->timer_int_enabled |=
1860 TMR_INT_ENABLED_PPP_EVENT;
1861 flags->imask |= PPP_INTR_TIMER;
1862 break;
1863 }
1864
1865 card->state_tick = jiffies;
1866 wanpipe_set_state(card, WAN_CONNECTED);
1867
1868 NEX_PRINTK(KERN_INFO "CON: L Tx: %lx B Tx: %lx || L Rx %lx B Rx %lx\n",
1869 (unsigned long)card->u.p.txbuf, *card->u.p.txbuf_next,
1870 (unsigned long)card->rxmb, *card->u.p.rxbuf_next);
1871
1872 /* Tell timer interrupt that PPP event occurred */
1873 ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_PPP_EVENT;
1874 flags->imask |= PPP_INTR_TIMER;
1875
1876 /* If we are in PEER mode, we must first obtain the
1877 * IP information and then go into the poll routine */
1878 if (card->u.p.ip_mode != WANOPT_PPP_PEER){
1879 trigger_ppp_poll(dev);
1880 }
1881 }
1882 break;
1883
1884 case PPP_INTR_DROP_DTR: /* DTR drop timeout expired 0x40 bit 6 */
1885
1886 NEX_PRINTK(KERN_INFO "DTR Drop Timeout Interrrupt \n");
1887
1888 if (card->u.p.ip_mode == WANOPT_PPP_PEER) {
1889 set_bit(0,&Read_connection_info);
1890 }
1891
1892 wanpipe_set_state(card, WAN_DISCONNECTED);
1893
1894 show_disc_cause(card, flags->disc_cause);
1895 ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_PPP_EVENT;
1896 flags->imask |= PPP_INTR_TIMER;
1897 trigger_ppp_poll(dev);
1898 break;
1899
1900 default:
1901 printk(KERN_INFO "%s: Error, Invalid PPP Event\n",card->devname);
1902 }
1903}
1904
1905
1906
1907/* TIMER INTERRUPT */
1908
1909void timer_intr (sdla_t *card)
1910{
1911
1912 struct net_device* dev = card->wandev.dev;
1913 ppp_private_area_t* ppp_priv_area = dev->priv;
1914 ppp_flags_t *flags = card->flags;
1915
1916
1917 if (ppp_priv_area->timer_int_enabled & TMR_INT_ENABLED_CONFIG){
1918 if (!config_ppp(card)){
1919 ppp_priv_area->timer_int_enabled &=
1920 ~TMR_INT_ENABLED_CONFIG;
1921 }
1922 }
1923
1924 /* Update statistics */
1925 if (ppp_priv_area->timer_int_enabled & TMR_INT_ENABLED_UPDATE){
1926 ppp_get_err_stats(card);
1927 if(!(--ppp_priv_area->update_comms_stats)){
1928 ppp_priv_area->timer_int_enabled &=
1929 ~TMR_INT_ENABLED_UPDATE;
1930 }
1931 }
1932
1933 /* PPIPEMON UDP request */
1934
1935 if (ppp_priv_area->timer_int_enabled & TMR_INT_ENABLED_UDP){
1936 process_udp_mgmt_pkt(card,dev, ppp_priv_area);
1937 ppp_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_UDP;
1938 }
1939
1940 /* PPP Event */
1941 if (ppp_priv_area->timer_int_enabled & TMR_INT_ENABLED_PPP_EVENT){
1942
1943 if (card->wandev.state == WAN_DISCONNECTED){
1944 retrigger_comm(card);
1945 }
1946
1947 /* If the state is CONNECTING, it means that communicatins were
1948 * enabled. When the remote side enables its comminication we
1949 * should get an interrupt PPP_INTR_OPEN, thus turn off polling
1950 */
1951
1952 else if (card->wandev.state == WAN_CONNECTING){
1953 /* Turn off the timer interrupt */
1954 ppp_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_PPP_EVENT;
1955 }
1956
1957 /* If state is connected and we are in PEER mode
1958 * poll for an IP address which will be provided by remote end.
1959 */
1960 else if ((card->wandev.state == WAN_CONNECTED &&
1961 card->u.p.ip_mode == WANOPT_PPP_PEER) &&
1962 test_bit(0,&Read_connection_info)){
1963
1964 card->state_tick = jiffies;
1965 if (read_connection_info (card)){
1966 printk(KERN_INFO "%s: Failed to read PEER IP Addresses\n",
1967 card->devname);
1968 }else{
1969 clear_bit(0,&Read_connection_info);
1970 set_bit(1,&Read_connection_info);
1971 trigger_ppp_poll(dev);
1972 }
1973 }else{
1974 //FIXME Put the comment back int
1975 ppp_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_PPP_EVENT;
1976 }
1977
1978 }/* End of PPP_EVENT */
1979
1980
1981 /* Only disable the timer interrupt if there are no udp, statistic */
1982 /* updates or events pending */
1983 if(!ppp_priv_area->timer_int_enabled) {
1984 flags->imask &= ~PPP_INTR_TIMER;
1985 }
1986}
1987
1988
1989static int handle_IPXWAN(unsigned char *sendpacket, char *devname, unsigned char enable_IPX, unsigned long network_number, unsigned short proto)
1990{
1991 int i;
1992
1993 if( proto == htons(ETH_P_IPX) ) {
1994 //It's an IPX packet
1995 if(!enable_IPX) {
1996 //Return 1 so we don't pass it up the stack.
1997 return 1;
1998 }
1999 } else {
2000 //It's not IPX so pass it up the stack.
2001 return 0;
2002 }
2003
2004 if( sendpacket[16] == 0x90 &&
2005 sendpacket[17] == 0x04)
2006 {
2007 //It's IPXWAN
2008
2009 if( sendpacket[2] == 0x02 &&
2010 sendpacket[34] == 0x00)
2011 {
2012 //It's a timer request packet
2013 printk(KERN_INFO "%s: Received IPXWAN Timer Request packet\n",devname);
2014
2015 //Go through the routing options and answer no to every
2016 //option except Unnumbered RIP/SAP
2017 for(i = 41; sendpacket[i] == 0x00; i += 5)
2018 {
2019 //0x02 is the option for Unnumbered RIP/SAP
2020 if( sendpacket[i + 4] != 0x02)
2021 {
2022 sendpacket[i + 1] = 0;
2023 }
2024 }
2025
2026 //Skip over the extended Node ID option
2027 if( sendpacket[i] == 0x04 )
2028 {
2029 i += 8;
2030 }
2031
2032 //We also want to turn off all header compression opt.
2033 for(; sendpacket[i] == 0x80 ;)
2034 {
2035 sendpacket[i + 1] = 0;
2036 i += (sendpacket[i + 2] << 8) + (sendpacket[i + 3]) + 4;
2037 }
2038
2039 //Set the packet type to timer response
2040 sendpacket[34] = 0x01;
2041
2042 printk(KERN_INFO "%s: Sending IPXWAN Timer Response\n",devname);
2043 }
2044 else if( sendpacket[34] == 0x02 )
2045 {
2046 //This is an information request packet
2047 printk(KERN_INFO "%s: Received IPXWAN Information Request packet\n",devname);
2048
2049 //Set the packet type to information response
2050 sendpacket[34] = 0x03;
2051
2052 //Set the router name
2053 sendpacket[51] = 'P';
2054 sendpacket[52] = 'T';
2055 sendpacket[53] = 'P';
2056 sendpacket[54] = 'I';
2057 sendpacket[55] = 'P';
2058 sendpacket[56] = 'E';
2059 sendpacket[57] = '-';
2060 sendpacket[58] = CVHexToAscii(network_number >> 28);
2061 sendpacket[59] = CVHexToAscii((network_number & 0x0F000000)>> 24);
2062 sendpacket[60] = CVHexToAscii((network_number & 0x00F00000)>> 20);
2063 sendpacket[61] = CVHexToAscii((network_number & 0x000F0000)>> 16);
2064 sendpacket[62] = CVHexToAscii((network_number & 0x0000F000)>> 12);
2065 sendpacket[63] = CVHexToAscii((network_number & 0x00000F00)>> 8);
2066 sendpacket[64] = CVHexToAscii((network_number & 0x000000F0)>> 4);
2067 sendpacket[65] = CVHexToAscii(network_number & 0x0000000F);
2068 for(i = 66; i < 99; i+= 1)
2069 {
2070 sendpacket[i] = 0;
2071 }
2072
2073 printk(KERN_INFO "%s: Sending IPXWAN Information Response packet\n",devname);
2074 }
2075 else
2076 {
2077 printk(KERN_INFO "%s: Unknown IPXWAN packet!\n",devname);
2078 return 0;
2079 }
2080
2081 //Set the WNodeID to our network address
2082 sendpacket[35] = (unsigned char)(network_number >> 24);
2083 sendpacket[36] = (unsigned char)((network_number & 0x00FF0000) >> 16);
2084 sendpacket[37] = (unsigned char)((network_number & 0x0000FF00) >> 8);
2085 sendpacket[38] = (unsigned char)(network_number & 0x000000FF);
2086
2087 return 1;
2088 } else {
2089 //If we get here it's an IPX-data packet, so it'll get passed up the stack.
2090
2091 //switch the network numbers
2092 switch_net_numbers(sendpacket, network_number, 1);
2093 return 0;
2094 }
2095}
2096
2097/****** Background Polling Routines ****************************************/
2098
2099/* All polling functions are invoked by the TIMER interrupt in the wpp_isr
2100 * routine.
2101 */
2102
2103/*============================================================================
2104 * Monitor active link phase.
2105 */
2106static void process_route (sdla_t *card)
2107{
2108 ppp_flags_t *flags = card->flags;
2109 struct net_device *dev = card->wandev.dev;
2110 ppp_private_area_t *ppp_priv_area = dev->priv;
2111
2112 if ((card->u.p.ip_mode == WANOPT_PPP_PEER) &&
2113 (flags->ip_state == 0x09)){
2114
2115 /* We get ip_local from the firmware in PEER mode.
2116 * Therefore, if ip_local is 0, we failed to obtain
2117 * the remote IP address. */
2118 if (ppp_priv_area->ip_local == 0)
2119 return;
2120
2121 printk(KERN_INFO "%s: IPCP State Opened.\n", card->devname);
2122 if (read_info( card )) {
2123 printk(KERN_INFO
2124 "%s: An error occurred in IP assignment.\n",
2125 card->devname);
2126 } else {
2127 struct in_device *in_dev = dev->ip_ptr;
2128 if (in_dev != NULL ) {
2129 struct in_ifaddr *ifa = in_dev->ifa_list;
2130
2131 printk(KERN_INFO "%s: Assigned Lcl. Addr: %u.%u.%u.%u\n",
2132 card->devname, NIPQUAD(ifa->ifa_local));
2133 printk(KERN_INFO "%s: Assigned Rmt. Addr: %u.%u.%u.%u\n",
2134 card->devname, NIPQUAD(ifa->ifa_address));
2135 }else{
2136 printk(KERN_INFO
2137 "%s: Error: Failed to add a route for PPP interface %s\n",
2138 card->devname,dev->name);
2139 }
2140 }
2141 }
2142}
2143
2144/*============================================================================
2145 * Monitor physical link disconnected phase.
2146 * o if interface is up and the hold-down timeout has expired, then retry
2147 * connection.
2148 */
2149static void retrigger_comm(sdla_t *card)
2150{
2151 struct net_device *dev = card->wandev.dev;
2152
2153 if (dev && ((jiffies - card->state_tick) > HOLD_DOWN_TIME)) {
2154
2155 wanpipe_set_state(card, WAN_CONNECTING);
2156
2157 if(ppp_comm_enable(card) == CMD_OK){
2158 init_ppp_tx_rx_buff( card );
2159 }
2160 }
2161}
2162
2163/****** Miscellaneous Functions *********************************************/
2164
2165/*============================================================================
2166 * Configure S508 adapter.
2167 */
2168static int config508(struct net_device *dev, sdla_t *card)
2169{
2170 ppp508_conf_t cfg;
2171 struct in_device *in_dev = dev->ip_ptr;
2172 ppp_private_area_t *ppp_priv_area = dev->priv;
2173
2174 /* Prepare PPP configuration structure */
2175 memset(&cfg, 0, sizeof(ppp508_conf_t));
2176
2177 if (card->wandev.clocking)
2178 cfg.line_speed = card->wandev.bps;
2179
2180 if (card->wandev.interface == WANOPT_RS232)
2181 cfg.conf_flags |= INTERFACE_LEVEL_RS232;
2182
2183
2184 cfg.conf_flags |= DONT_TERMINATE_LNK_MAX_CONFIG; /*send Configure-Request packets forever*/
2185 cfg.txbuf_percent = PERCENT_TX_BUFF; /* % of Tx bufs */
2186 cfg.mtu_local = card->wandev.mtu;
2187 cfg.mtu_remote = card->wandev.mtu; /* Default */
2188 cfg.restart_tmr = TIME_BETWEEN_CONF_REQ; /* 30 = 3sec */
2189 cfg.auth_rsrt_tmr = TIME_BETWEEN_PAP_CHAP_REQ; /* 30 = 3sec */
2190 cfg.auth_wait_tmr = WAIT_PAP_CHAP_WITHOUT_REPLY; /* 300 = 30s */
2191 cfg.mdm_fail_tmr = WAIT_AFTER_DCD_CTS_LOW; /* 5 = 0.5s */
2192 cfg.dtr_drop_tmr = TIME_DCD_CTS_LOW_AFTER_LNK_DOWN; /* 10 = 1s */
2193 cfg.connect_tmout = WAIT_DCD_HIGH_AFTER_ENABLE_COMM; /* 900 = 90s */
2194 cfg.conf_retry = MAX_CONF_REQ_WITHOUT_REPLY; /* 10 = 1s */
2195 cfg.term_retry = MAX_TERM_REQ_WITHOUT_REPLY; /* 2 times */
2196 cfg.fail_retry = NUM_CONF_NAK_WITHOUT_REPLY; /* 5 times */
2197 cfg.auth_retry = NUM_AUTH_REQ_WITHOUT_REPLY; /* 10 times */
2198
2199
2200 if( !card->u.p.authenticator ) {
2201 printk(KERN_INFO "%s: Device is not configured as an authenticator\n",
2202 card->devname);
2203 cfg.auth_options = NO_AUTHENTICATION;
2204 }else{
2205 printk(KERN_INFO "%s: Device is configured as an authenticator\n",
2206 card->devname);
2207 cfg.auth_options = INBOUND_AUTH;
2208 }
2209
2210 if( ppp_priv_area->pap == WANOPT_YES){
2211 cfg.auth_options |=PAP_AUTH;
2212 printk(KERN_INFO "%s: Pap enabled\n", card->devname);
2213 }
2214 if( ppp_priv_area->chap == WANOPT_YES){
2215 cfg.auth_options |= CHAP_AUTH;
2216 printk(KERN_INFO "%s: Chap enabled\n", card->devname);
2217 }
2218
2219
2220 if (ppp_priv_area->enable_IPX == WANOPT_YES){
2221 printk(KERN_INFO "%s: Enabling IPX Protocol\n",card->devname);
2222 cfg.ipx_options = ENABLE_IPX | ROUTING_PROT_DEFAULT;
2223 }else{
2224 cfg.ipx_options = DISABLE_IPX;
2225 }
2226
2227 switch (card->u.p.ip_mode) {
2228
2229 case WANOPT_PPP_STATIC:
2230
2231 printk(KERN_INFO "%s: PPP IP Mode: STATIC\n",card->devname);
2232 cfg.ip_options = L_AND_R_IP_NO_ASSIG |
2233 ENABLE_IP;
2234 cfg.ip_local = in_dev->ifa_list->ifa_local;
2235 cfg.ip_remote = in_dev->ifa_list->ifa_address;
2236 /* Debugging code used to check that IP addresses
2237 * obtained from the kernel are correct */
2238
2239 NEX_PRINTK(KERN_INFO "Local %u.%u.%u.%u Remote %u.%u.%u.%u Name %s\n",
2240 NIPQUAD(ip_local),NIPQUAD(ip_remote), dev->name);
2241 break;
2242
2243 case WANOPT_PPP_HOST:
2244
2245 printk(KERN_INFO "%s: PPP IP Mode: HOST\n",card->devname);
2246 cfg.ip_options = L_IP_LOCAL_ASSIG |
2247 R_IP_LOCAL_ASSIG |
2248 ENABLE_IP;
2249 cfg.ip_local = in_dev->ifa_list->ifa_local;
2250 cfg.ip_remote = in_dev->ifa_list->ifa_address;
2251 /* Debugging code used to check that IP addresses
2252 * obtained from the kernel are correct */
2253 NEX_PRINTK (KERN_INFO "Local %u.%u.%u.%u Remote %u.%u.%u.%u Name %s\n",
2254 NIPQUAD(ip_local),NIPQUAD(ip_remote), dev->name);
2255
2256 break;
2257
2258 case WANOPT_PPP_PEER:
2259
2260 printk(KERN_INFO "%s: PPP IP Mode: PEER\n",card->devname);
2261 cfg.ip_options = L_IP_REMOTE_ASSIG |
2262 R_IP_REMOTE_ASSIG |
2263 ENABLE_IP;
2264 cfg.ip_local = 0x00;
2265 cfg.ip_remote = 0x00;
2266 break;
2267
2268 default:
2269 printk(KERN_INFO "%s: ERROR: Unsupported PPP Mode Selected\n",
2270 card->devname);
2271 printk(KERN_INFO "%s: PPP IP Modes: STATIC, PEER or HOST\n",
2272 card->devname);
2273 return 1;
2274 }
2275
2276 return ppp_configure(card, &cfg);
2277}
2278
2279/*============================================================================
2280 * Show disconnection cause.
2281 */
2282static void show_disc_cause(sdla_t *card, unsigned cause)
2283{
2284 if (cause & 0x0802)
2285
2286 printk(KERN_INFO "%s: link terminated by peer\n",
2287 card->devname);
2288
2289 else if (cause & 0x0004)
2290
2291 printk(KERN_INFO "%s: link terminated by user\n",
2292 card->devname);
2293
2294 else if (cause & 0x0008)
2295
2296 printk(KERN_INFO "%s: authentication failed\n", card->devname);
2297
2298 else if (cause & 0x0010)
2299
2300 printk(KERN_INFO
2301 "%s: authentication protocol negotiation failed\n",
2302 card->devname);
2303
2304 else if (cause & 0x0020)
2305
2306 printk(KERN_INFO
2307 "%s: peer's request for authentication rejected\n",
2308 card->devname);
2309
2310 else if (cause & 0x0040)
2311
2312 printk(KERN_INFO "%s: MRU option rejected by peer\n",
2313 card->devname);
2314
2315 else if (cause & 0x0080)
2316
2317 printk(KERN_INFO "%s: peer's MRU was too small\n",
2318 card->devname);
2319
2320 else if (cause & 0x0100)
2321
2322 printk(KERN_INFO "%s: failed to negotiate peer's LCP options\n",
2323 card->devname);
2324
2325 else if (cause & 0x0200)
2326
2327 printk(KERN_INFO "%s: failed to negotiate peer's IPCP options\n"
2328 , card->devname);
2329
2330 else if (cause & 0x0400)
2331
2332 printk(KERN_INFO
2333 "%s: failed to negotiate peer's IPXCP options\n",
2334 card->devname);
2335}
2336
2337/*=============================================================================
2338 * Process UDP call of type PTPIPEAB.
2339 */
2340static void process_udp_mgmt_pkt(sdla_t *card, struct net_device *dev,
2341 ppp_private_area_t *ppp_priv_area )
2342{
2343 unsigned char buf2[5];
2344 unsigned char *buf;
2345 unsigned int frames, len;
2346 struct sk_buff *new_skb;
2347 unsigned short data_length, buffer_length, real_len;
2348 unsigned long data_ptr;
2349 int udp_mgmt_req_valid = 1;
2350 ppp_mbox_t *mbox = card->mbox;
2351 struct timeval tv;
2352 int err;
2353 ppp_udp_pkt_t *ppp_udp_pkt = (ppp_udp_pkt_t*)&ppp_priv_area->udp_pkt_data;
2354
2355 memcpy(&buf2, &card->wandev.udp_port, 2 );
2356
2357
2358 if(ppp_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK) {
2359
2360 switch(ppp_udp_pkt->cblock.command) {
2361
2362 case PPIPE_GET_IBA_DATA:
2363 case PPP_READ_CONFIG:
2364 case PPP_GET_CONNECTION_INFO:
2365 case PPIPE_ROUTER_UP_TIME:
2366 case PPP_READ_STATISTICS:
2367 case PPP_READ_ERROR_STATS:
2368 case PPP_READ_PACKET_STATS:
2369 case PPP_READ_LCP_STATS:
2370 case PPP_READ_IPCP_STATS:
2371 case PPP_READ_IPXCP_STATS:
2372 case PPP_READ_PAP_STATS:
2373 case PPP_READ_CHAP_STATS:
2374 case PPP_READ_CODE_VERSION:
2375 udp_mgmt_req_valid = 1;
2376 break;
2377
2378 default:
2379 udp_mgmt_req_valid = 0;
2380 break;
2381 }
2382 }
2383
2384 if(!udp_mgmt_req_valid) {
2385
2386 /* set length to 0 */
2387 ppp_udp_pkt->cblock.length = 0x00;
2388
2389 /* set return code */
2390 ppp_udp_pkt->cblock.result = 0xCD;
2391 ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_direction_err;
2392
2393 if (net_ratelimit()){
2394 printk(KERN_INFO
2395 "%s: Warning, Illegal UDP command attempted from network: %x\n",
2396 card->devname,ppp_udp_pkt->cblock.command);
2397 }
2398 } else {
2399 /* Initialize the trace element */
2400 trace_element_t trace_element;
2401
2402 switch (ppp_udp_pkt->cblock.command){
2403
2404 /* PPIPE_ENABLE_TRACING */
2405 case PPIPE_ENABLE_TRACING:
2406 if (!card->TracingEnabled) {
2407
2408 /* OPERATE_DATALINE_MONITOR */
2409 mbox->cmd.command = PPP_DATALINE_MONITOR;
2410 mbox->cmd.length = 0x01;
2411 mbox->data[0] = ppp_udp_pkt->data[0];
2412 err = sdla_exec(mbox) ?
2413 mbox->cmd.result : CMD_TIMEOUT;
2414
2415 if (err != CMD_OK) {
2416
2417 ppp_error(card, err, mbox);
2418 card->TracingEnabled = 0;
2419
2420 /* set the return code */
2421
2422 ppp_udp_pkt->cblock.result = mbox->cmd.result;
2423 mbox->cmd.length = 0;
2424 break;
2425 }
2426
2427 sdla_peek(&card->hw, 0xC000, &buf2, 2);
2428
2429 ppp_priv_area->curr_trace_addr = 0;
2430 memcpy(&ppp_priv_area->curr_trace_addr, &buf2, 2);
2431 ppp_priv_area->start_trace_addr =
2432 ppp_priv_area->curr_trace_addr;
2433 ppp_priv_area->end_trace_addr =
2434 ppp_priv_area->start_trace_addr + END_OFFSET;
2435
2436 /* MAX_SEND_BUFFER_SIZE - 28 (IP header)
2437 - 32 (ppipemon CBLOCK) */
2438 available_buffer_space = MAX_LGTH_UDP_MGNT_PKT -
2439 sizeof(ip_pkt_t)-
2440 sizeof(udp_pkt_t)-
2441 sizeof(wp_mgmt_t)-
2442 sizeof(cblock_t);
2443 }
2444 ppp_udp_pkt->cblock.result = 0;
2445 mbox->cmd.length = 0;
2446 card->TracingEnabled = 1;
2447 break;
2448
2449 /* PPIPE_DISABLE_TRACING */
2450 case PPIPE_DISABLE_TRACING:
2451
2452 if(card->TracingEnabled) {
2453
2454 /* OPERATE_DATALINE_MONITOR */
2455 mbox->cmd.command = 0x33;
2456 mbox->cmd.length = 1;
2457 mbox->data[0] = 0x00;
2458 err = sdla_exec(mbox) ?
2459 mbox->cmd.result : CMD_TIMEOUT;
2460
2461 }
2462
2463 /*set return code*/
2464 ppp_udp_pkt->cblock.result = 0;
2465 mbox->cmd.length = 0;
2466 card->TracingEnabled = 0;
2467 break;
2468
2469 /* PPIPE_GET_TRACE_INFO */
2470 case PPIPE_GET_TRACE_INFO:
2471
2472 if(!card->TracingEnabled) {
2473 /* set return code */
2474 ppp_udp_pkt->cblock.result = 1;
2475 mbox->cmd.length = 0;
2476 }
2477
2478 buffer_length = 0;
2479
2480 /* frames < 62, where 62 is the number of trace
2481 information elements. There is in total 496
2482 bytes of space and each trace information
2483 element is 8 bytes.
2484 */
2485 for ( frames=0; frames<62; frames++) {
2486
2487 trace_pkt_t *trace_pkt = (trace_pkt_t *)
2488 &ppp_udp_pkt->data[buffer_length];
2489
2490 /* Read the whole trace packet */
2491 sdla_peek(&card->hw, ppp_priv_area->curr_trace_addr,
2492 &trace_element, sizeof(trace_element_t));
2493
2494 /* no data on board so exit */
2495 if( trace_element.opp_flag == 0x00 )
2496 break;
2497
2498 data_ptr = trace_element.trace_data_ptr;
2499
2500 /* See if there is actual data on the trace buffer */
2501 if (data_ptr){
2502 data_length = trace_element.trace_length;
2503 }else{
2504 data_length = 0;
2505 ppp_udp_pkt->data[0] |= 0x02;
2506 }
2507
2508 //FIXME: Do we need this check
2509 if ((available_buffer_space - buffer_length)
2510 < (sizeof(trace_element_t)+1)){
2511
2512 /*indicate we have more frames
2513 * on board and exit
2514 */
2515 ppp_udp_pkt->data[0] |= 0x02;
2516 break;
2517 }
2518
2519 trace_pkt->status = trace_element.trace_type;
2520 trace_pkt->time_stamp = trace_element.trace_time_stamp;
2521 trace_pkt->real_length = trace_element.trace_length;
2522
2523 real_len = trace_element.trace_length;
2524
2525 if(data_ptr == 0){
2526 trace_pkt->data_avail = 0x00;
2527 }else{
2528 /* we can take it next time */
2529 if ((available_buffer_space - buffer_length)<
2530 (real_len + sizeof(trace_pkt_t))){
2531
2532 ppp_udp_pkt->data[0] |= 0x02;
2533 break;
2534 }
2535 trace_pkt->data_avail = 0x01;
2536
2537 /* get the data */
2538 sdla_peek(&card->hw, data_ptr,
2539 &trace_pkt->data,
2540 real_len);
2541 }
2542 /* zero the opp flag to
2543 show we got the frame */
2544 buf2[0] = 0x00;
2545 sdla_poke(&card->hw, ppp_priv_area->curr_trace_addr,
2546 &buf2, 1);
2547
2548 /* now move onto the next
2549 frame */
2550 ppp_priv_area->curr_trace_addr += 8;
2551
2552 /* check if we passed the last address */
2553 if ( ppp_priv_area->curr_trace_addr >=
2554 ppp_priv_area->end_trace_addr){
2555
2556 ppp_priv_area->curr_trace_addr =
2557 ppp_priv_area->start_trace_addr;
2558 }
2559
2560 /* update buffer length and make sure its even */
2561
2562 if ( trace_pkt->data_avail == 0x01 ) {
2563 buffer_length += real_len - 1;
2564 }
2565
2566 /* for the header */
2567 buffer_length += 8;
2568
2569 if( buffer_length & 0x0001 )
2570 buffer_length += 1;
2571 }
2572
2573 /* ok now set the total number of frames passed
2574 in the high 5 bits */
2575 ppp_udp_pkt->data[0] |= (frames << 2);
2576
2577 /* set the data length */
2578 mbox->cmd.length = buffer_length;
2579 ppp_udp_pkt->cblock.length = buffer_length;
2580
2581 /* set return code */
2582 ppp_udp_pkt->cblock.result = 0;
2583 break;
2584
2585 /* PPIPE_GET_IBA_DATA */
2586 case PPIPE_GET_IBA_DATA:
2587
2588 mbox->cmd.length = 0x09;
2589
2590 sdla_peek(&card->hw, 0xF003, &ppp_udp_pkt->data,
2591 mbox->cmd.length);
2592
2593 /* set the length of the data */
2594 ppp_udp_pkt->cblock.length = 0x09;
2595
2596 /* set return code */
2597 ppp_udp_pkt->cblock.result = 0x00;
2598 ppp_udp_pkt->cblock.result = 0;
2599 break;
2600
2601 /* PPIPE_FT1_READ_STATUS */
2602 case PPIPE_FT1_READ_STATUS:
2603 sdla_peek(&card->hw, 0xF020, &ppp_udp_pkt->data[0], 2);
2604 ppp_udp_pkt->cblock.length = mbox->cmd.length = 2;
2605 ppp_udp_pkt->cblock.result = 0;
2606 break;
2607
2608 case PPIPE_FLUSH_DRIVER_STATS:
2609 init_ppp_priv_struct( ppp_priv_area );
2610 init_global_statistics( card );
2611 mbox->cmd.length = 0;
2612 ppp_udp_pkt->cblock.result = 0;
2613 break;
2614
2615
2616 case PPIPE_ROUTER_UP_TIME:
2617
2618 do_gettimeofday( &tv );
2619 ppp_priv_area->router_up_time = tv.tv_sec -
2620 ppp_priv_area->router_start_time;
2621 *(unsigned long *)&ppp_udp_pkt->data = ppp_priv_area->router_up_time;
2622 mbox->cmd.length = 4;
2623 ppp_udp_pkt->cblock.result = 0;
2624 break;
2625
2626 /* PPIPE_DRIVER_STATISTICS */
2627 case PPIPE_DRIVER_STAT_IFSEND:
2628 memcpy(&ppp_udp_pkt->data, &ppp_priv_area->if_send_stat,
2629 sizeof(if_send_stat_t));
2630
2631
2632 ppp_udp_pkt->cblock.result = 0;
2633 ppp_udp_pkt->cblock.length = sizeof(if_send_stat_t);
2634 mbox->cmd.length = sizeof(if_send_stat_t);
2635 break;
2636
2637 case PPIPE_DRIVER_STAT_INTR:
2638 memcpy(&ppp_udp_pkt->data, &card->statistics,
2639 sizeof(global_stats_t));
2640
2641 memcpy(&ppp_udp_pkt->data+sizeof(global_stats_t),
2642 &ppp_priv_area->rx_intr_stat,
2643 sizeof(rx_intr_stat_t));
2644
2645 ppp_udp_pkt->cblock.result = 0;
2646 ppp_udp_pkt->cblock.length = sizeof(global_stats_t)+
2647 sizeof(rx_intr_stat_t);
2648 mbox->cmd.length = ppp_udp_pkt->cblock.length;
2649 break;
2650
2651 case PPIPE_DRIVER_STAT_GEN:
2652 memcpy( &ppp_udp_pkt->data,
2653 &ppp_priv_area->pipe_mgmt_stat,
2654 sizeof(pipe_mgmt_stat_t));
2655
2656 memcpy(&ppp_udp_pkt->data+sizeof(pipe_mgmt_stat_t),
2657 &card->statistics, sizeof(global_stats_t));
2658
2659 ppp_udp_pkt->cblock.result = 0;
2660 ppp_udp_pkt->cblock.length = sizeof(global_stats_t)+
2661 sizeof(rx_intr_stat_t);
2662 mbox->cmd.length = ppp_udp_pkt->cblock.length;
2663 break;
2664
2665
2666 /* FT1 MONITOR STATUS */
2667 case FT1_MONITOR_STATUS_CTRL:
2668
2669 /* Enable FT1 MONITOR STATUS */
2670 if( ppp_udp_pkt->data[0] == 1) {
2671
2672 if( rCount++ != 0 ) {
2673 ppp_udp_pkt->cblock.result = 0;
2674 mbox->cmd.length = 1;
2675 break;
2676 }
2677 }
2678
2679 /* Disable FT1 MONITOR STATUS */
2680 if( ppp_udp_pkt->data[0] == 0) {
2681
2682 if( --rCount != 0) {
2683 ppp_udp_pkt->cblock.result = 0;
2684 mbox->cmd.length = 1;
2685 break;
2686 }
2687 }
2688 goto udp_dflt_cmd;
2689
2690 /* WARNING: FIXME: This should be fixed.
2691 * The FT1 Status Ctrl doesn't have a break
2692 * statment. Thus, no code must be inserted
2693 * HERE: between default and above case statement */
2694
2695 default:
2696udp_dflt_cmd:
2697
2698 /* it's a board command */
2699 mbox->cmd.command = ppp_udp_pkt->cblock.command;
2700 mbox->cmd.length = ppp_udp_pkt->cblock.length;
2701
2702 if(mbox->cmd.length) {
2703 memcpy(&mbox->data,(unsigned char *)ppp_udp_pkt->data,
2704 mbox->cmd.length);
2705 }
2706
2707 /* run the command on the board */
2708 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2709
2710 if (err != CMD_OK) {
2711
2712 ppp_error(card, err, mbox);
2713 ++ppp_priv_area->pipe_mgmt_stat.
2714 UDP_PIPE_mgmt_adptr_cmnd_timeout;
2715 break;
2716 }
2717
2718 ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_cmnd_OK;
2719
2720 /* copy the result back to our buffer */
2721 memcpy(&ppp_udp_pkt->cblock,mbox, sizeof(cblock_t));
2722
2723 if(mbox->cmd.length) {
2724 memcpy(&ppp_udp_pkt->data,&mbox->data,mbox->cmd.length);
2725 }
2726
2727 } /* end of switch */
2728 } /* end of else */
2729
2730 /* Fill UDP TTL */
2731 ppp_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
2732 len = reply_udp(ppp_priv_area->udp_pkt_data, mbox->cmd.length);
2733
2734 if (ppp_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK) {
2735
2736 /* Make sure we are not already sending */
2737 if (!test_bit(SEND_CRIT,&card->wandev.critical)){
2738 ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_passed_to_adptr;
2739 ppp_send(card,ppp_priv_area->udp_pkt_data,len,ppp_priv_area->protocol);
2740 }
2741
2742 } else {
2743
2744 /* Pass it up the stack
2745 Allocate socket buffer */
2746 if ((new_skb = dev_alloc_skb(len)) != NULL) {
2747
2748 /* copy data into new_skb */
2749
2750 buf = skb_put(new_skb, len);
2751 memcpy(buf,ppp_priv_area->udp_pkt_data, len);
2752
2753 ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_passed_to_stack;
2754
2755 /* Decapsulate packet and pass it up the protocol
2756 stack */
2757 new_skb->protocol = htons(ETH_P_IP);
2758 new_skb->dev = dev;
2759 new_skb->mac.raw = new_skb->data;
2760 netif_rx(new_skb);
2761 dev->last_rx = jiffies;
2762
2763 } else {
2764
2765 ++ppp_priv_area->pipe_mgmt_stat.UDP_PIPE_mgmt_no_socket;
2766 printk(KERN_INFO "no socket buffers available!\n");
2767 }
2768 }
2769
2770 ppp_priv_area->udp_pkt_lgth = 0;
2771
2772 return;
2773}
2774
2775/*=============================================================================
2776 * Initial the ppp_private_area structure.
2777 */
2778static void init_ppp_priv_struct( ppp_private_area_t *ppp_priv_area )
2779{
2780
2781 memset(&ppp_priv_area->if_send_stat, 0, sizeof(if_send_stat_t));
2782 memset(&ppp_priv_area->rx_intr_stat, 0, sizeof(rx_intr_stat_t));
2783 memset(&ppp_priv_area->pipe_mgmt_stat, 0, sizeof(pipe_mgmt_stat_t));
2784}
2785
2786/*============================================================================
2787 * Initialize Global Statistics
2788 */
2789static void init_global_statistics( sdla_t *card )
2790{
2791 memset(&card->statistics, 0, sizeof(global_stats_t));
2792}
2793
2794/*============================================================================
2795 * Initialize Receive and Transmit Buffers.
2796 */
2797static void init_ppp_tx_rx_buff( sdla_t *card )
2798{
2799 ppp508_buf_info_t* info;
2800
2801 if (card->hw.type == SDLA_S514) {
2802
2803 info = (void*)(card->hw.dpmbase + PPP514_BUF_OFFS);
2804
2805 card->u.p.txbuf_base = (void*)(card->hw.dpmbase +
2806 info->txb_ptr);
2807
2808 card->u.p.txbuf_last = (ppp_buf_ctl_t*)card->u.p.txbuf_base +
2809 (info->txb_num - 1);
2810
2811 card->u.p.rxbuf_base = (void*)(card->hw.dpmbase +
2812 info->rxb_ptr);
2813
2814 card->u.p.rxbuf_last = (ppp_buf_ctl_t*)card->u.p.rxbuf_base +
2815 (info->rxb_num - 1);
2816
2817 } else {
2818
2819 info = (void*)(card->hw.dpmbase + PPP508_BUF_OFFS);
2820
2821 card->u.p.txbuf_base = (void*)(card->hw.dpmbase +
2822 (info->txb_ptr - PPP508_MB_VECT));
2823
2824 card->u.p.txbuf_last = (ppp_buf_ctl_t*)card->u.p.txbuf_base +
2825 (info->txb_num - 1);
2826
2827 card->u.p.rxbuf_base = (void*)(card->hw.dpmbase +
2828 (info->rxb_ptr - PPP508_MB_VECT));
2829
2830 card->u.p.rxbuf_last = (ppp_buf_ctl_t*)card->u.p.rxbuf_base +
2831 (info->rxb_num - 1);
2832 }
2833
2834 card->u.p.txbuf_next = (unsigned long*)&info->txb_nxt;
2835 card->u.p.rxbuf_next = (unsigned long*)&info->rxb1_ptr;
2836
2837 card->u.p.rx_base = info->rxb_base;
2838 card->u.p.rx_top = info->rxb_end;
2839
2840 card->u.p.txbuf = card->u.p.txbuf_base;
2841 card->rxmb = card->u.p.rxbuf_base;
2842
2843}
2844
2845/*=============================================================================
2846 * Read Connection Information (ie for Remote IP address assginment).
2847 * Called when ppp interface connected.
2848 */
2849static int read_info( sdla_t *card )
2850{
2851 struct net_device *dev = card->wandev.dev;
2852 ppp_private_area_t *ppp_priv_area = dev->priv;
2853 int err;
2854
2855 struct ifreq if_info;
2856 struct sockaddr_in *if_data1, *if_data2;
2857 mm_segment_t fs;
2858
2859 /* Set Local and remote addresses */
2860 memset(&if_info, 0, sizeof(if_info));
2861 strcpy(if_info.ifr_name, dev->name);
2862
2863
2864 fs = get_fs();
2865 set_fs(get_ds()); /* get user space block */
2866
2867 /* Change the local and remote ip address of the interface.
2868 * This will also add in the destination route.
2869 */
2870 if_data1 = (struct sockaddr_in *)&if_info.ifr_addr;
2871 if_data1->sin_addr.s_addr = ppp_priv_area->ip_local;
2872 if_data1->sin_family = AF_INET;
2873 err = devinet_ioctl( SIOCSIFADDR, &if_info );
2874 if_data2 = (struct sockaddr_in *)&if_info.ifr_dstaddr;
2875 if_data2->sin_addr.s_addr = ppp_priv_area->ip_remote;
2876 if_data2->sin_family = AF_INET;
2877 err = devinet_ioctl( SIOCSIFDSTADDR, &if_info );
2878
2879 set_fs(fs); /* restore old block */
2880
2881 if (err) {
2882 printk (KERN_INFO "%s: Adding of route failed: %i\n",
2883 card->devname,err);
2884 printk (KERN_INFO "%s: Local : %u.%u.%u.%u\n",
2885 card->devname,NIPQUAD(ppp_priv_area->ip_local));
2886 printk (KERN_INFO "%s: Remote: %u.%u.%u.%u\n",
2887 card->devname,NIPQUAD(ppp_priv_area->ip_remote));
2888 }
2889 return err;
2890}
2891
2892/*=============================================================================
2893 * Remove Dynamic Route.
2894 * Called when ppp interface disconnected.
2895 */
2896
2897static void remove_route( sdla_t *card )
2898{
2899
2900 struct net_device *dev = card->wandev.dev;
2901 long ip_addr;
2902 int err;
2903
2904 mm_segment_t fs;
2905 struct ifreq if_info;
2906 struct sockaddr_in *if_data1;
2907 struct in_device *in_dev = dev->ip_ptr;
2908 struct in_ifaddr *ifa = in_dev->ifa_list;
2909
2910 ip_addr = ifa->ifa_local;
2911
2912 /* Set Local and remote addresses */
2913 memset(&if_info, 0, sizeof(if_info));
2914 strcpy(if_info.ifr_name, dev->name);
2915
2916 fs = get_fs();
2917 set_fs(get_ds()); /* get user space block */
2918
2919 /* Change the local ip address of the interface to 0.
2920 * This will also delete the destination route.
2921 */
2922 if_data1 = (struct sockaddr_in *)&if_info.ifr_addr;
2923 if_data1->sin_addr.s_addr = 0;
2924 if_data1->sin_family = AF_INET;
2925 err = devinet_ioctl( SIOCSIFADDR, &if_info );
2926
2927 set_fs(fs); /* restore old block */
2928
2929
2930 if (err) {
2931 printk (KERN_INFO "%s: Deleting dynamic route failed %d!\n",
2932 card->devname, err);
2933 return;
2934 }else{
2935 printk (KERN_INFO "%s: PPP Deleting dynamic route %u.%u.%u.%u successfuly\n",
2936 card->devname, NIPQUAD(ip_addr));
2937 }
2938 return;
2939}
2940
2941/*=============================================================================
2942 * Perform the Interrupt Test by running the READ_CODE_VERSION command MAX_INTR
2943 * _TEST_COUNTER times.
2944 */
2945static int intr_test( sdla_t *card )
2946{
2947 ppp_mbox_t *mb = card->mbox;
2948 int err,i;
2949
2950 err = ppp_set_intr_mode( card, 0x08 );
2951
2952 if (err == CMD_OK) {
2953
2954 for (i = 0; i < MAX_INTR_TEST_COUNTER; i ++) {
2955 /* Run command READ_CODE_VERSION */
2956 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
2957 mb->cmd.length = 0;
2958 mb->cmd.command = PPP_READ_CODE_VERSION;
2959 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
2960 if (err != CMD_OK)
2961 ppp_error(card, err, mb);
2962 }
2963 }
2964 else return err;
2965
2966 err = ppp_set_intr_mode( card, 0 );
2967 if (err != CMD_OK)
2968 return err;
2969
2970 return 0;
2971}
2972
2973/*==============================================================================
2974 * Determine what type of UDP call it is. DRVSTATS or PTPIPEAB ?
2975 */
2976static int udp_pkt_type( struct sk_buff *skb, sdla_t *card )
2977{
2978 unsigned char *sendpacket;
2979 unsigned char buf2[5];
2980 ppp_udp_pkt_t *ppp_udp_pkt = (ppp_udp_pkt_t *)skb->data;
2981
2982 sendpacket = skb->data;
2983 memcpy(&buf2, &card->wandev.udp_port, 2);
2984
2985 if( ppp_udp_pkt->ip_pkt.ver_inet_hdr_length == 0x45 && /* IP packet */
2986 sendpacket[9] == 0x11 && /* UDP packet */
2987 sendpacket[22] == buf2[1] && /* UDP Port */
2988 sendpacket[23] == buf2[0] &&
2989 sendpacket[36] == 0x01 ) {
2990
2991 if ( sendpacket[28] == 0x50 && /* PTPIPEAB: Signature */
2992 sendpacket[29] == 0x54 &&
2993 sendpacket[30] == 0x50 &&
2994 sendpacket[31] == 0x49 &&
2995 sendpacket[32] == 0x50 &&
2996 sendpacket[33] == 0x45 &&
2997 sendpacket[34] == 0x41 &&
2998 sendpacket[35] == 0x42 ){
2999
3000 return UDP_PTPIPE_TYPE;
3001
3002 } else if(sendpacket[28] == 0x44 && /* DRVSTATS: Signature */
3003 sendpacket[29] == 0x52 &&
3004 sendpacket[30] == 0x56 &&
3005 sendpacket[31] == 0x53 &&
3006 sendpacket[32] == 0x54 &&
3007 sendpacket[33] == 0x41 &&
3008 sendpacket[34] == 0x54 &&
3009 sendpacket[35] == 0x53 ){
3010
3011 return UDP_DRVSTATS_TYPE;
3012
3013 } else
3014 return UDP_INVALID_TYPE;
3015
3016 } else
3017 return UDP_INVALID_TYPE;
3018
3019}
3020
3021/*============================================================================
3022 * Check to see if the packet to be transmitted contains a broadcast or
3023 * multicast source IP address.
3024 */
3025
3026static int chk_bcast_mcast_addr(sdla_t *card, struct net_device* dev,
3027 struct sk_buff *skb)
3028{
3029 u32 src_ip_addr;
3030 u32 broadcast_ip_addr = 0;
3031 struct in_device *in_dev;
3032
3033 /* read the IP source address from the outgoing packet */
3034 src_ip_addr = *(u32 *)(skb->data + 12);
3035
3036 /* read the IP broadcast address for the device */
3037 in_dev = dev->ip_ptr;
3038 if(in_dev != NULL) {
3039 struct in_ifaddr *ifa= in_dev->ifa_list;
3040 if(ifa != NULL)
3041 broadcast_ip_addr = ifa->ifa_broadcast;
3042 else
3043 return 0;
3044 }
3045
3046 /* check if the IP Source Address is a Broadcast address */
3047 if((dev->flags & IFF_BROADCAST) && (src_ip_addr == broadcast_ip_addr)) {
3048 printk(KERN_INFO "%s: Broadcast Source Address silently discarded\n",
3049 card->devname);
3050 return 1;
3051 }
3052
3053 /* check if the IP Source Address is a Multicast address */
3054 if((ntohl(src_ip_addr) >= 0xE0000001) &&
3055 (ntohl(src_ip_addr) <= 0xFFFFFFFE)) {
3056 printk(KERN_INFO "%s: Multicast Source Address silently discarded\n",
3057 card->devname);
3058 return 1;
3059 }
3060
3061 return 0;
3062}
3063
3064void s508_lock (sdla_t *card, unsigned long *smp_flags)
3065{
3066 spin_lock_irqsave(&card->wandev.lock, *smp_flags);
3067}
3068
3069void s508_unlock (sdla_t *card, unsigned long *smp_flags)
3070{
3071 spin_unlock_irqrestore(&card->wandev.lock, *smp_flags);
3072}
3073
3074static int read_connection_info (sdla_t *card)
3075{
3076 ppp_mbox_t *mb = card->mbox;
3077 struct net_device *dev = card->wandev.dev;
3078 ppp_private_area_t *ppp_priv_area = dev->priv;
3079 ppp508_connect_info_t *ppp508_connect_info;
3080 int err;
3081
3082 memset(&mb->cmd, 0, sizeof(ppp_cmd_t));
3083 mb->cmd.length = 0;
3084 mb->cmd.command = PPP_GET_CONNECTION_INFO;
3085 err = sdla_exec(mb) ? mb->cmd.result : CMD_TIMEOUT;
3086
3087 if (err != CMD_OK) {
3088 ppp_error(card, err, mb);
3089 ppp_priv_area->ip_remote = 0;
3090 ppp_priv_area->ip_local = 0;
3091 }
3092 else {
3093 ppp508_connect_info = (ppp508_connect_info_t *)mb->data;
3094 ppp_priv_area->ip_remote = ppp508_connect_info->ip_remote;
3095 ppp_priv_area->ip_local = ppp508_connect_info->ip_local;
3096
3097 NEX_PRINTK(KERN_INFO "READ CONNECTION GOT IP ADDRESS %x, %x\n",
3098 ppp_priv_area->ip_remote,
3099 ppp_priv_area->ip_local);
3100 }
3101
3102 return err;
3103}
3104
3105/*===============================================================================
3106 * config_ppp
3107 *
3108 * Configure the ppp protocol and enable communications.
3109 *
3110 * The if_open function binds this function to the poll routine.
3111 * Therefore, this function will run every time the ppp interface
3112 * is brought up.
3113 *
3114 * If the communications are not enabled, proceed to configure
3115 * the card and enable communications.
3116 *
3117 * If the communications are enabled, it means that the interface
3118 * was shutdown by ether the user or driver. In this case, we
3119 * have to check that the IP addresses have not changed. If
3120 * the IP addresses changed, we have to reconfigure the firmware
3121 * and update the changed IP addresses. Otherwise, just exit.
3122 */
3123static int config_ppp (sdla_t *card)
3124{
3125
3126 struct net_device *dev = card->wandev.dev;
3127 ppp_flags_t *flags = card->flags;
3128 ppp_private_area_t *ppp_priv_area = dev->priv;
3129
3130 if (card->u.p.comm_enabled){
3131
3132 if (ppp_priv_area->ip_local_tmp != ppp_priv_area->ip_local ||
3133 ppp_priv_area->ip_remote_tmp != ppp_priv_area->ip_remote){
3134
3135 /* The IP addersses have changed, we must
3136 * stop the communications and reconfigure
3137 * the card. Reason: the firmware must know
3138 * the local and remote IP addresses. */
3139 disable_comm(card);
3140 wanpipe_set_state(card, WAN_DISCONNECTED);
3141 printk(KERN_INFO
3142 "%s: IP addresses changed!\n",
3143 card->devname);
3144 printk(KERN_INFO "%s: Restarting communications ...\n",
3145 card->devname);
3146 }else{
3147 /* IP addresses are the same and the link is up,
3148 * we don't have to do anything here. Therefore, exit */
3149 return 0;
3150 }
3151 }
3152
3153 /* Record the new IP addreses */
3154 ppp_priv_area->ip_local = ppp_priv_area->ip_local_tmp;
3155 ppp_priv_area->ip_remote = ppp_priv_area->ip_remote_tmp;
3156
3157 if (config508(dev, card)){
3158 printk(KERN_INFO "%s: Failed to configure PPP device\n",
3159 card->devname);
3160 return 0;
3161 }
3162
3163 if (ppp_set_intr_mode(card, PPP_INTR_RXRDY|
3164 PPP_INTR_TXRDY|
3165 PPP_INTR_MODEM|
3166 PPP_INTR_DISC |
3167 PPP_INTR_OPEN |
3168 PPP_INTR_DROP_DTR |
3169 PPP_INTR_TIMER)) {
3170
3171 printk(KERN_INFO "%s: Failed to configure board interrupts !\n",
3172 card->devname);
3173 return 0;
3174 }
3175
3176 /* Turn off the transmit and timer interrupt */
3177 flags->imask &= ~(PPP_INTR_TXRDY | PPP_INTR_TIMER) ;
3178
3179
3180 /* If you are not the authenticator and any one of the protocol is
3181 * enabled then we call the set_out_bound_authentication.
3182 */
3183 if ( !card->u.p.authenticator && (ppp_priv_area->pap || ppp_priv_area->chap)) {
3184 if ( ppp_set_outbnd_auth(card, ppp_priv_area) ){
3185 printk(KERN_INFO "%s: Outbound authentication failed !\n",
3186 card->devname);
3187 return 0;
3188 }
3189 }
3190
3191 /* If you are the authenticator and any one of the protocol is enabled
3192 * then we call the set_in_bound_authentication.
3193 */
3194 if (card->u.p.authenticator && (ppp_priv_area->pap || ppp_priv_area->chap)){
3195 if (ppp_set_inbnd_auth(card, ppp_priv_area)){
3196 printk(KERN_INFO "%s: Inbound authentication failed !\n",
3197 card->devname);
3198 return 0;
3199 }
3200 }
3201
3202 /* If we fail to enable communications here it's OK,
3203 * since the DTR timer will cause a disconnected, which
3204 * will retrigger communication in timer_intr() */
3205 if (ppp_comm_enable(card) == CMD_OK) {
3206 wanpipe_set_state(card, WAN_CONNECTING);
3207 init_ppp_tx_rx_buff(card);
3208 }
3209
3210 return 0;
3211}
3212
3213/*============================================================
3214 * ppp_poll
3215 *
3216 * Rationale:
3217 * We cannot manipulate the routing tables, or
3218 * ip addresses withing the interrupt. Therefore
3219 * we must perform such actons outside an interrupt
3220 * at a later time.
3221 *
3222 * Description:
3223 * PPP polling routine, responsible for
3224 * shutting down interfaces upon disconnect
3225 * and adding/removing routes.
3226 *
3227 * Usage:
3228 * This function is executed for each ppp
3229 * interface through a tq_schedule bottom half.
3230 *
3231 * trigger_ppp_poll() function is used to kick
3232 * the ppp_poll routine.
3233 */
3234static void ppp_poll(struct net_device *dev)
3235{
3236 ppp_private_area_t *ppp_priv_area;
3237 sdla_t *card;
3238 u8 check_gateway=0;
3239 ppp_flags_t *flags;
3240
3241 if (!dev || (ppp_priv_area = dev->priv) == NULL)
3242 return;
3243
3244 card = ppp_priv_area->card;
3245 flags = card->flags;
3246
3247 /* Shutdown is in progress, stop what you are
3248 * doing and get out */
3249 if (test_bit(PERI_CRIT,&card->wandev.critical)){
3250 clear_bit(POLL_CRIT,&card->wandev.critical);
3251 return;
3252 }
3253
3254 /* if_open() function has triggered the polling routine
3255 * to determine the configured IP addresses. Once the
3256 * addresses are found, trigger the chdlc configuration */
3257 if (test_bit(0,&ppp_priv_area->config_ppp)){
3258
3259 ppp_priv_area->ip_local_tmp = get_ip_address(dev,WAN_LOCAL_IP);
3260 ppp_priv_area->ip_remote_tmp = get_ip_address(dev,WAN_POINTOPOINT_IP);
3261
3262 if (ppp_priv_area->ip_local_tmp == ppp_priv_area->ip_remote_tmp &&
3263 card->u.p.ip_mode == WANOPT_PPP_HOST){
3264
3265 if (++ppp_priv_area->ip_error > MAX_IP_ERRORS){
3266 printk(KERN_INFO "\n%s: --- WARNING ---\n",
3267 card->devname);
3268 printk(KERN_INFO "%s: The local IP address is the same as the\n",
3269 card->devname);
3270 printk(KERN_INFO "%s: Point-to-Point IP address.\n",
3271 card->devname);
3272 printk(KERN_INFO "%s: --- WARNING ---\n\n",
3273 card->devname);
3274 }else{
3275 clear_bit(POLL_CRIT,&card->wandev.critical);
3276 ppp_priv_area->poll_delay_timer.expires = jiffies+HZ;
3277 add_timer(&ppp_priv_area->poll_delay_timer);
3278 return;
3279 }
3280 }
3281
3282 ppp_priv_area->timer_int_enabled |= TMR_INT_ENABLED_CONFIG;
3283 flags->imask |= PPP_INTR_TIMER;
3284 ppp_priv_area->ip_error=0;
3285
3286 clear_bit(0,&ppp_priv_area->config_ppp);
3287 clear_bit(POLL_CRIT,&card->wandev.critical);
3288 return;
3289 }
3290
3291 /* Dynamic interface implementation, as well as dynamic
3292 * routing. */
3293
3294 switch (card->wandev.state) {
3295
3296 case WAN_DISCONNECTED:
3297
3298 /* If the dynamic interface configuration is on, and interface
3299 * is up, then bring down the netowrk interface */
3300
3301 if (test_bit(DYN_OPT_ON,&ppp_priv_area->interface_down) &&
3302 !test_bit(DEV_DOWN,&ppp_priv_area->interface_down) &&
3303 card->wandev.dev->flags & IFF_UP){
3304
3305 printk(KERN_INFO "%s: Interface %s down.\n",
3306 card->devname,card->wandev.dev->name);
3307 change_dev_flags(card->wandev.dev,
3308 (card->wandev.dev->flags&~IFF_UP));
3309 set_bit(DEV_DOWN,&ppp_priv_area->interface_down);
3310 }else{
3311 /* We need to check if the local IP address is
3312 * zero. If it is, we shouldn't try to remove it.
3313 * For some reason the kernel crashes badly if
3314 * we try to remove the route twice */
3315
3316 if (card->wandev.dev->flags & IFF_UP &&
3317 get_ip_address(card->wandev.dev,WAN_LOCAL_IP) &&
3318 card->u.p.ip_mode == WANOPT_PPP_PEER){
3319
3320 remove_route(card);
3321 }
3322 }
3323 break;
3324
3325 case WAN_CONNECTED:
3326
3327 /* In SMP machine this code can execute before the interface
3328 * comes up. In this case, we must make sure that we do not
3329 * try to bring up the interface before dev_open() is finished */
3330
3331
3332 /* DEV_DOWN will be set only when we bring down the interface
3333 * for the very first time. This way we know that it was us
3334 * that brought the interface down */
3335
3336 if (test_bit(DYN_OPT_ON,&ppp_priv_area->interface_down) &&
3337 test_bit(DEV_DOWN, &ppp_priv_area->interface_down) &&
3338 !(card->wandev.dev->flags & IFF_UP)){
3339
3340 printk(KERN_INFO "%s: Interface %s up.\n",
3341 card->devname,card->wandev.dev->name);
3342
3343 change_dev_flags(card->wandev.dev,(card->wandev.dev->flags|IFF_UP));
3344 clear_bit(DEV_DOWN,&ppp_priv_area->interface_down);
3345 check_gateway=1;
3346 }
3347
3348 if ((card->u.p.ip_mode == WANOPT_PPP_PEER) &&
3349 test_bit(1,&Read_connection_info)) {
3350
3351 process_route(card);
3352 clear_bit(1,&Read_connection_info);
3353 check_gateway=1;
3354 }
3355
3356 if (ppp_priv_area->gateway && check_gateway)
3357 add_gateway(card,dev);
3358
3359 break;
3360 }
3361 clear_bit(POLL_CRIT,&card->wandev.critical);
3362 return;
3363}
3364
3365/*============================================================
3366 * trigger_ppp_poll
3367 *
3368 * Description:
3369 * Add a ppp_poll() task into a tq_scheduler bh handler
3370 * for a specific interface. This will kick
3371 * the ppp_poll() routine at a later time.
3372 *
3373 * Usage:
3374 * Interrupts use this to defer a taks to
3375 * a polling routine.
3376 *
3377 */
3378
3379static void trigger_ppp_poll(struct net_device *dev)
3380{
3381 ppp_private_area_t *ppp_priv_area;
3382 if ((ppp_priv_area=dev->priv) != NULL){
3383
3384 sdla_t *card = ppp_priv_area->card;
3385
3386 if (test_bit(PERI_CRIT,&card->wandev.critical)){
3387 return;
3388 }
3389
3390 if (test_and_set_bit(POLL_CRIT,&card->wandev.critical)){
3391 return;
3392 }
3393
3394 schedule_work(&ppp_priv_area->poll_work);
3395 }
3396 return;
3397}
3398
3399static void ppp_poll_delay (unsigned long dev_ptr)
3400{
3401 struct net_device *dev = (struct net_device *)dev_ptr;
3402 trigger_ppp_poll(dev);
3403}
3404
3405/*============================================================
3406 * detect_and_fix_tx_bug
3407 *
3408 * Description:
3409 * On connect, if the board tx buffer ptr is not the same
3410 * as the driver tx buffer ptr, we found a firmware bug.
3411 * Report the bug to the above layer. To fix the
3412 * error restart communications again.
3413 *
3414 * Usage:
3415 *
3416 */
3417
3418static int detect_and_fix_tx_bug (sdla_t *card)
3419{
3420 if (((unsigned long)card->u.p.txbuf_base&0xFFF) != ((*card->u.p.txbuf_next)&0xFFF)){
3421 NEX_PRINTK(KERN_INFO "Major Error, Fix the bug\n");
3422 return 1;
3423 }
3424 return 0;
3425}
3426
3427MODULE_LICENSE("GPL");
3428
3429/****** End *****************************************************************/
diff --git a/drivers/net/wan/sdla_x25.c b/drivers/net/wan/sdla_x25.c
new file mode 100644
index 000000000000..3a93d2fd4fbf
--- /dev/null
+++ b/drivers/net/wan/sdla_x25.c
@@ -0,0 +1,5496 @@
1/*****************************************************************************
2* sdla_x25.c WANPIPE(tm) Multiprotocol WAN Link Driver. X.25 module.
3*
4* Author: Nenad Corbic <ncorbic@sangoma.com>
5*
6* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
7*
8* This program is free software; you can redistribute it and/or
9* modify it under the terms of the GNU General Public License
10* as published by the Free Software Foundation; either version
11* 2 of the License, or (at your option) any later version.
12* ============================================================================
13* Apr 03, 2001 Nenad Corbic o Fixed the rx_skb=NULL bug in x25 in rx_intr().
14* Dec 26, 2000 Nenad Corbic o Added a new polling routine, that uses
15* a kernel timer (more efficient).
16* Dec 25, 2000 Nenad Corbic o Updated for 2.4.X kernel
17* Jul 26, 2000 Nenad Corbic o Increased the local packet buffering
18* for API to 4096+header_size.
19* Jul 17, 2000 Nenad Corbic o Fixed the x25 startup bug. Enable
20* communications only after all interfaces
21* come up. HIGH SVC/PVC is used to calculate
22* the number of channels.
23* Enable protocol only after all interfaces
24* are enabled.
25* Jul 10, 2000 Nenad Corbic o Fixed the M_BIT bug.
26* Apr 25, 2000 Nenad Corbic o Pass Modem messages to the API.
27* Disable idle timeout in X25 API.
28* Apr 14, 2000 Nenad Corbic o Fixed: Large LCN number support.
29* Maximum LCN number is 4095.
30* Maximum number of X25 channels is 255.
31* Apr 06, 2000 Nenad Corbic o Added SMP Support.
32* Mar 29, 2000 Nenad Corbic o Added support for S514 PCI Card
33* Mar 23, 2000 Nenad Corbic o Improved task queue, BH handling.
34* Mar 14, 2000 Nenad Corbic o Updated Protocol Violation handling
35* routines. Bug Fix.
36* Mar 10, 2000 Nenad Corbic o Bug Fix: corrupted mbox recovery.
37* Mar 09, 2000 Nenad Corbic o Fixed the auto HDLC bug.
38* Mar 08, 2000 Nenad Corbic o Fixed LAPB HDLC startup problems.
39* Application must bring the link up
40* before tx/rx, and bring the
41* link down on close().
42* Mar 06, 2000 Nenad Corbic o Added an option for logging call setup
43* information.
44* Feb 29, 2000 Nenad Corbic o Added support for LAPB HDLC API
45* Feb 25, 2000 Nenad Corbic o Fixed the modem failure handling.
46* No Modem OOB message will be passed
47* to the user.
48* Feb 21, 2000 Nenad Corbic o Added Xpipemon Debug Support
49* Dec 30, 1999 Nenad Corbic o Socket based X25API
50* Sep 17, 1998 Jaspreet Singh o Updates for 2.2.X kernel
51* Mar 15, 1998 Alan Cox o 2.1.x porting
52* Dec 19, 1997 Jaspreet Singh o Added multi-channel IPX support
53* Nov 27, 1997 Jaspreet Singh o Added protection against enabling of irqs
54* when they are disabled.
55* Nov 17, 1997 Farhan Thawar o Added IPX support
56* o Changed if_send() to now buffer packets when
57* the board is busy
58* o Removed queueing of packets via the polling
59* routing
60* o Changed if_send() critical flags to properly
61* handle race conditions
62* Nov 06, 1997 Farhan Thawar o Added support for SVC timeouts
63* o Changed PVC encapsulation to ETH_P_IP
64* Jul 21, 1997 Jaspreet Singh o Fixed freeing up of buffers using kfree()
65* when packets are received.
66* Mar 11, 1997 Farhan Thawar Version 3.1.1
67* o added support for V35
68* o changed if_send() to return 0 if
69* wandev.critical() is true
70* o free socket buffer in if_send() if
71* returning 0
72* o added support for single '@' address to
73* accept all incoming calls
74* o fixed bug in set_chan_state() to disconnect
75* Jan 15, 1997 Gene Kozin Version 3.1.0
76* o implemented exec() entry point
77* Jan 07, 1997 Gene Kozin Initial version.
78*****************************************************************************/
79
80/*======================================================
81 * Includes
82 *=====================================================*/
83
84#include <linux/module.h>
85#include <linux/kernel.h> /* printk(), and other useful stuff */
86#include <linux/stddef.h> /* offsetof(), etc. */
87#include <linux/errno.h> /* return codes */
88#include <linux/string.h> /* inline memset(), etc. */
89#include <linux/ctype.h>
90#include <linux/slab.h> /* kmalloc(), kfree() */
91#include <linux/wanrouter.h> /* WAN router definitions */
92#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
93#include <linux/workqueue.h>
94#include <asm/byteorder.h> /* htons(), etc. */
95#include <asm/atomic.h>
96#include <linux/delay.h> /* Experimental delay */
97
98#include <asm/uaccess.h>
99
100#include <linux/if.h>
101#include <linux/if_arp.h>
102#include <linux/sdla_x25.h> /* X.25 firmware API definitions */
103#include <linux/if_wanpipe_common.h>
104#include <linux/if_wanpipe.h>
105
106
107/*======================================================
108 * Defines & Macros
109 *=====================================================*/
110
111
112#define CMD_OK 0 /* normal firmware return code */
113#define CMD_TIMEOUT 0xFF /* firmware command timed out */
114#define MAX_CMD_RETRY 10 /* max number of firmware retries */
115
116#define X25_CHAN_MTU 4096 /* unfragmented logical channel MTU */
117#define X25_HRDHDR_SZ 7 /* max encapsulation header size */
118#define X25_CONCT_TMOUT (90*HZ) /* link connection timeout */
119#define X25_RECON_TMOUT (10*HZ) /* link connection timeout */
120#define CONNECT_TIMEOUT (90*HZ) /* link connection timeout */
121#define HOLD_DOWN_TIME (30*HZ) /* link hold down time */
122#define MAX_BH_BUFF 10
123#define M_BIT 0x01
124
125//#define PRINT_DEBUG 1
126#ifdef PRINT_DEBUG
127#define DBG_PRINTK(format, a...) printk(format, ## a)
128#else
129#define DBG_PRINTK(format, a...)
130#endif
131
132#define TMR_INT_ENABLED_POLL_ACTIVE 0x01
133#define TMR_INT_ENABLED_POLL_CONNECT_ON 0x02
134#define TMR_INT_ENABLED_POLL_CONNECT_OFF 0x04
135#define TMR_INT_ENABLED_POLL_DISCONNECT 0x08
136#define TMR_INT_ENABLED_CMD_EXEC 0x10
137#define TMR_INT_ENABLED_UPDATE 0x20
138#define TMR_INT_ENABLED_UDP_PKT 0x40
139
140#define MAX_X25_ADDR_SIZE 16
141#define MAX_X25_DATA_SIZE 129
142#define MAX_X25_FACL_SIZE 110
143
144#define TRY_CMD_AGAIN 2
145#define DELAY_RESULT 1
146#define RETURN_RESULT 0
147
148#define DCD(x) (x & 0x03 ? "HIGH" : "LOW")
149#define CTS(x) (x & 0x05 ? "HIGH" : "LOW")
150
151
152/* Driver will not write log messages about
153 * modem status if defined.*/
154#define MODEM_NOT_LOG 1
155
156/*====================================================
157 * For IPXWAN
158 *===================================================*/
159
160#define CVHexToAscii(b) (((unsigned char)(b) > (unsigned char)9) ? ((unsigned char)'A' + ((unsigned char)(b) - (unsigned char)10)) : ((unsigned char)'0' + (unsigned char)(b)))
161
162
163/*====================================================
164 * MEMORY DEBUGGING FUNCTION
165 *====================================================
166
167#define KMEM_SAFETYZONE 8
168
169static void * dbg_kmalloc(unsigned int size, int prio, int line) {
170 int i = 0;
171 void * v = kmalloc(size+sizeof(unsigned int)+2*KMEM_SAFETYZONE*8,prio);
172 char * c1 = v;
173 c1 += sizeof(unsigned int);
174 *((unsigned int *)v) = size;
175
176 for (i = 0; i < KMEM_SAFETYZONE; i++) {
177 c1[0] = 'D'; c1[1] = 'E'; c1[2] = 'A'; c1[3] = 'D';
178 c1[4] = 'B'; c1[5] = 'E'; c1[6] = 'E'; c1[7] = 'F';
179 c1 += 8;
180 }
181 c1 += size;
182 for (i = 0; i < KMEM_SAFETYZONE; i++) {
183 c1[0] = 'M'; c1[1] = 'U'; c1[2] = 'N'; c1[3] = 'G';
184 c1[4] = 'W'; c1[5] = 'A'; c1[6] = 'L'; c1[7] = 'L';
185 c1 += 8;
186 }
187 v = ((char *)v) + sizeof(unsigned int) + KMEM_SAFETYZONE*8;
188 printk(KERN_INFO "line %d kmalloc(%d,%d) = %p\n",line,size,prio,v);
189 return v;
190}
191static void dbg_kfree(void * v, int line) {
192 unsigned int * sp = (unsigned int *)(((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8));
193 unsigned int size = *sp;
194 char * c1 = ((char *)v) - KMEM_SAFETYZONE*8;
195 int i = 0;
196 for (i = 0; i < KMEM_SAFETYZONE; i++) {
197 if ( c1[0] != 'D' || c1[1] != 'E' || c1[2] != 'A' || c1[3] != 'D'
198 || c1[4] != 'B' || c1[5] != 'E' || c1[6] != 'E' || c1[7] != 'F') {
199 printk(KERN_INFO "kmalloced block at %p has been corrupted (underrun)!\n",v);
200 printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
201 c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
202 }
203 c1 += 8;
204 }
205 c1 += size;
206 for (i = 0; i < KMEM_SAFETYZONE; i++) {
207 if ( c1[0] != 'M' || c1[1] != 'U' || c1[2] != 'N' || c1[3] != 'G'
208 || c1[4] != 'W' || c1[5] != 'A' || c1[6] != 'L' || c1[7] != 'L'
209 ) {
210 printk(KERN_INFO "kmalloced block at %p has been corrupted (overrun):\n",v);
211 printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
212 c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
213 }
214 c1 += 8;
215 }
216 printk(KERN_INFO "line %d kfree(%p)\n",line,v);
217 v = ((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8);
218 kfree(v);
219}
220
221#define kmalloc(x,y) dbg_kmalloc(x,y,__LINE__)
222#define kfree(x) dbg_kfree(x,__LINE__)
223
224==============================================================*/
225
226
227
228/*===============================================
229 * Data Structures
230 *===============================================*/
231
232
233/*========================================================
234 * Name: x25_channel
235 *
236 * Purpose: To hold private informaton for each
237 * logical channel.
238 *
239 * Rationale: Per-channel debugging is possible if each
240 * channel has its own private area.
241 *
242 * Assumptions:
243 *
244 * Description: This is an extention of the struct net_device
245 * we create for each network interface to keep
246 * the rest of X.25 channel-specific data.
247 *
248 * Construct: Typedef
249 */
250typedef struct x25_channel
251{
252 wanpipe_common_t common; /* common area for x25api and socket */
253 char name[WAN_IFNAME_SZ+1]; /* interface name, ASCIIZ */
254 char addr[WAN_ADDRESS_SZ+1]; /* media address, ASCIIZ */
255 unsigned tx_pkt_size;
256 unsigned short protocol; /* ethertype, 0 - multiplexed */
257 char drop_sequence; /* mark sequence for dropping */
258 unsigned long state_tick; /* time of the last state change */
259 unsigned idle_timeout; /* sec, before disconnecting */
260 unsigned long i_timeout_sofar; /* # of sec's we've been idle */
261 unsigned hold_timeout; /* sec, before re-connecting */
262 unsigned long tick_counter; /* counter for transmit time out */
263 char devtint; /* Weather we should dev_tint() */
264 struct sk_buff* rx_skb; /* receive socket buffer */
265 struct sk_buff* tx_skb; /* transmit socket buffer */
266
267 bh_data_t *bh_head; /* Circular buffer for x25api_bh */
268 unsigned long tq_working;
269 volatile int bh_write;
270 volatile int bh_read;
271 atomic_t bh_buff_used;
272
273 sdla_t* card; /* -> owner */
274 struct net_device *dev; /* -> bound devce */
275
276 int ch_idx;
277 unsigned char enable_IPX;
278 unsigned long network_number;
279 struct net_device_stats ifstats; /* interface statistics */
280 unsigned short transmit_length;
281 unsigned short tx_offset;
282 char transmit_buffer[X25_CHAN_MTU+sizeof(x25api_hdr_t)];
283
284 if_send_stat_t if_send_stat;
285 rx_intr_stat_t rx_intr_stat;
286 pipe_mgmt_stat_t pipe_mgmt_stat;
287
288 unsigned long router_start_time; /* Router start time in seconds */
289 unsigned long router_up_time;
290
291} x25_channel_t;
292
293/* FIXME Take this out */
294
295#ifdef NEX_OLD_CALL_INFO
296typedef struct x25_call_info
297{
298 char dest[17]; PACKED;/* ASCIIZ destination address */
299 char src[17]; PACKED;/* ASCIIZ source address */
300 char nuser; PACKED;/* number of user data bytes */
301 unsigned char user[127]; PACKED;/* user data */
302 char nfacil; PACKED;/* number of facilities */
303 struct
304 {
305 unsigned char code; PACKED;
306 unsigned char parm; PACKED;
307 } facil[64]; /* facilities */
308} x25_call_info_t;
309#else
310typedef struct x25_call_info
311{
312 char dest[MAX_X25_ADDR_SIZE] PACKED;/* ASCIIZ destination address */
313 char src[MAX_X25_ADDR_SIZE] PACKED;/* ASCIIZ source address */
314 unsigned char nuser PACKED;
315 unsigned char user[MAX_X25_DATA_SIZE] PACKED;/* user data */
316 unsigned char nfacil PACKED;
317 unsigned char facil[MAX_X25_FACL_SIZE] PACKED;
318 unsigned short lcn PACKED;
319} x25_call_info_t;
320#endif
321
322
323
324/*===============================================
325 * Private Function Prototypes
326 *==============================================*/
327
328
329/*=================================================
330 * WAN link driver entry points. These are
331 * called by the WAN router module.
332 */
333static int update(struct wan_device* wandev);
334static int new_if(struct wan_device* wandev, struct net_device* dev,
335 wanif_conf_t* conf);
336static int del_if(struct wan_device* wandev, struct net_device* dev);
337static void disable_comm (sdla_t* card);
338static void disable_comm_shutdown(sdla_t *card);
339
340
341
342/*=================================================
343 * WANPIPE-specific entry points
344 */
345static int wpx_exec (struct sdla* card, void* u_cmd, void* u_data);
346static void x25api_bh(struct net_device *dev);
347static int x25api_bh_cleanup(struct net_device *dev);
348static int bh_enqueue(struct net_device *dev, struct sk_buff *skb);
349
350
351/*=================================================
352 * Network device interface
353 */
354static int if_init(struct net_device* dev);
355static int if_open(struct net_device* dev);
356static int if_close(struct net_device* dev);
357static int if_header(struct sk_buff* skb, struct net_device* dev,
358 unsigned short type, void* daddr, void* saddr, unsigned len);
359static int if_rebuild_hdr (struct sk_buff* skb);
360static int if_send(struct sk_buff* skb, struct net_device* dev);
361static struct net_device_stats *if_stats(struct net_device* dev);
362
363static void if_tx_timeout(struct net_device *dev);
364
365/*=================================================
366 * Interrupt handlers
367 */
368static void wpx_isr (sdla_t *);
369static void rx_intr (sdla_t *);
370static void tx_intr (sdla_t *);
371static void status_intr (sdla_t *);
372static void event_intr (sdla_t *);
373static void spur_intr (sdla_t *);
374static void timer_intr (sdla_t *);
375
376static int tx_intr_send(sdla_t *card, struct net_device *dev);
377static struct net_device *move_dev_to_next(sdla_t *card,
378 struct net_device *dev);
379
380/*=================================================
381 * Background polling routines
382 */
383static void wpx_poll (sdla_t* card);
384static void poll_disconnected (sdla_t* card);
385static void poll_connecting (sdla_t* card);
386static void poll_active (sdla_t* card);
387static void trigger_x25_poll(sdla_t *card);
388static void x25_timer_routine(unsigned long data);
389
390
391
392/*=================================================
393 * X.25 firmware interface functions
394 */
395static int x25_get_version (sdla_t* card, char* str);
396static int x25_configure (sdla_t* card, TX25Config* conf);
397static int hdlc_configure (sdla_t* card, TX25Config* conf);
398static int set_hdlc_level (sdla_t* card);
399static int x25_get_err_stats (sdla_t* card);
400static int x25_get_stats (sdla_t* card);
401static int x25_set_intr_mode (sdla_t* card, int mode);
402static int x25_close_hdlc (sdla_t* card);
403static int x25_open_hdlc (sdla_t* card);
404static int x25_setup_hdlc (sdla_t* card);
405static int x25_set_dtr (sdla_t* card, int dtr);
406static int x25_get_chan_conf (sdla_t* card, x25_channel_t* chan);
407static int x25_place_call (sdla_t* card, x25_channel_t* chan);
408static int x25_accept_call (sdla_t* card, int lcn, int qdm);
409static int x25_clear_call (sdla_t* card, int lcn, int cause, int diagn);
410static int x25_send (sdla_t* card, int lcn, int qdm, int len, void* buf);
411static int x25_fetch_events (sdla_t* card);
412static int x25_error (sdla_t* card, int err, int cmd, int lcn);
413
414/*=================================================
415 * X.25 asynchronous event handlers
416 */
417static int incoming_call (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
418static int call_accepted (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
419static int call_cleared (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
420static int timeout_event (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
421static int restart_event (sdla_t* card, int cmd, int lcn, TX25Mbox* mb);
422
423
424/*=================================================
425 * Miscellaneous functions
426 */
427static int connect (sdla_t* card);
428static int disconnect (sdla_t* card);
429static struct net_device* get_dev_by_lcn(struct wan_device* wandev,
430 unsigned lcn);
431static int chan_connect(struct net_device* dev);
432static int chan_disc(struct net_device* dev);
433static void set_chan_state(struct net_device* dev, int state);
434static int chan_send(struct net_device *dev, void* buff, unsigned data_len,
435 unsigned char tx_intr);
436static unsigned char bps_to_speed_code (unsigned long bps);
437static unsigned int dec_to_uint (unsigned char* str, int len);
438static unsigned int hex_to_uint (unsigned char*, int);
439static void parse_call_info (unsigned char*, x25_call_info_t*);
440static struct net_device *find_channel(sdla_t *card, unsigned lcn);
441static void bind_lcn_to_dev(sdla_t *card, struct net_device *dev, unsigned lcn);
442static void setup_for_delayed_transmit(struct net_device *dev,
443 void *buf, unsigned len);
444
445
446/*=================================================
447 * X25 API Functions
448 */
449static int wanpipe_pull_data_in_skb(sdla_t *card, struct net_device *dev,
450 struct sk_buff **);
451static void timer_intr_exec(sdla_t *, unsigned char);
452static int execute_delayed_cmd(sdla_t *card, struct net_device *dev,
453 mbox_cmd_t *usr_cmd, char bad_cmd);
454static int api_incoming_call (sdla_t*, TX25Mbox *, int);
455static int alloc_and_init_skb_buf (sdla_t *,struct sk_buff **, int);
456static void send_delayed_cmd_result(sdla_t *card, struct net_device *dev,
457 TX25Mbox* mbox);
458static int clear_confirm_event (sdla_t *, TX25Mbox*);
459static void send_oob_msg (sdla_t *card, struct net_device *dev, TX25Mbox *mbox);
460static int timer_intr_cmd_exec(sdla_t *card);
461static void api_oob_event (sdla_t *card,TX25Mbox *mbox);
462static int check_bad_command(sdla_t *card, struct net_device *dev);
463static int channel_disconnect(sdla_t* card, struct net_device *dev);
464static void hdlc_link_down (sdla_t*);
465
466/*=================================================
467 * XPIPEMON Functions
468 */
469static int process_udp_mgmt_pkt(sdla_t *);
470static int udp_pkt_type( struct sk_buff *, sdla_t*);
471static int reply_udp( unsigned char *, unsigned int);
472static void init_x25_channel_struct( x25_channel_t *);
473static void init_global_statistics( sdla_t *);
474static int store_udp_mgmt_pkt(int udp_type, char udp_pkt_src, sdla_t *card,
475 struct net_device *dev,
476 struct sk_buff *skb, int lcn);
477static unsigned short calc_checksum (char *, int);
478
479
480
481/*=================================================
482 * IPX functions
483 */
484static void switch_net_numbers(unsigned char *, unsigned long, unsigned char);
485static int handle_IPXWAN(unsigned char *, char *, unsigned char ,
486 unsigned long , unsigned short );
487
488extern void disable_irq(unsigned int);
489extern void enable_irq(unsigned int);
490
491static void S508_S514_lock(sdla_t *, unsigned long *);
492static void S508_S514_unlock(sdla_t *, unsigned long *);
493
494
495/*=================================================
496 * Global Variables
497 *=================================================*/
498
499
500
501/*=================================================
502 * Public Functions
503 *=================================================*/
504
505
506
507
508/*===================================================================
509 * wpx_init: X.25 Protocol Initialization routine.
510 *
511 * Purpose: To initialize the protocol/firmware.
512 *
513 * Rationale: This function is called by setup() function, in
514 * sdlamain.c, to dynamically setup the x25 protocol.
515 * This is the first protocol specific function, which
516 * executes once on startup.
517 *
518 * Description: This procedure initializes the x25 firmware and
519 * sets up the mailbox, transmit and receive buffer
520 * pointers. It also initializes all debugging structures
521 * and sets up the X25 environment.
522 *
523 * Sets up hardware options defined by user in [wanpipe#]
524 * section of wanpipe#.conf configuration file.
525 *
526 * At this point adapter is completely initialized
527 * and X.25 firmware is running.
528 * o read firmware version (to make sure it's alive)
529 * o configure adapter
530 * o initialize protocol-specific fields of the
531 * adapter data space.
532 *
533 * Called by: setup() function in sdlamain.c
534 *
535 * Assumptions: None
536 *
537 * Warnings: None
538 *
539 * Return: 0 o.k.
540 * < 0 failure.
541 */
542
543int wpx_init (sdla_t* card, wandev_conf_t* conf)
544{
545 union{
546 char str[80];
547 TX25Config cfg;
548 } u;
549
550 /* Verify configuration ID */
551 if (conf->config_id != WANCONFIG_X25){
552 printk(KERN_INFO "%s: invalid configuration ID %u!\n",
553 card->devname, conf->config_id)
554 ;
555 return -EINVAL;
556 }
557
558 /* Initialize protocol-specific fields */
559 card->mbox = (void*)(card->hw.dpmbase + X25_MBOX_OFFS);
560 card->rxmb = (void*)(card->hw.dpmbase + X25_RXMBOX_OFFS);
561 card->flags = (void*)(card->hw.dpmbase + X25_STATUS_OFFS);
562
563 /* Initialize for S514 Card */
564 if(card->hw.type == SDLA_S514) {
565 card->mbox += X25_MB_VECTOR;
566 card->flags += X25_MB_VECTOR;
567 card->rxmb += X25_MB_VECTOR;
568 }
569
570
571 /* Read firmware version. Note that when adapter initializes, it
572 * clears the mailbox, so it may appear that the first command was
573 * executed successfully when in fact it was merely erased. To work
574 * around this, we execute the first command twice.
575 */
576 if (x25_get_version(card, NULL) || x25_get_version(card, u.str))
577 return -EIO;
578
579
580 /* X25 firmware can run ether in X25 or LAPB HDLC mode.
581 * Check the user defined option and configure accordingly */
582 if (conf->u.x25.LAPB_hdlc_only == WANOPT_YES){
583 if (set_hdlc_level(card) != CMD_OK){
584 return -EIO;
585 }else{
586 printk(KERN_INFO "%s: running LAP_B HDLC firmware v%s\n",
587 card->devname, u.str);
588 }
589 card->u.x.LAPB_hdlc = 1;
590 }else{
591 printk(KERN_INFO "%s: running X.25 firmware v%s\n",
592 card->devname, u.str);
593 card->u.x.LAPB_hdlc = 0;
594 }
595
596 /* Configure adapter. Here we set resonable defaults, then parse
597 * device configuration structure and set configuration options.
598 * Most configuration options are verified and corrected (if
599 * necessary) since we can't rely on the adapter to do so.
600 */
601 memset(&u.cfg, 0, sizeof(u.cfg));
602 u.cfg.t1 = 3;
603 u.cfg.n2 = 10;
604 u.cfg.autoHdlc = 1; /* automatic HDLC connection */
605 u.cfg.hdlcWindow = 7;
606 u.cfg.pktWindow = 2;
607 u.cfg.station = 1; /* DTE */
608 u.cfg.options = 0x0090; /* disable D-bit pragmatics */
609 u.cfg.ccittCompat = 1988;
610 u.cfg.t10t20 = 30;
611 u.cfg.t11t21 = 30;
612 u.cfg.t12t22 = 30;
613 u.cfg.t13t23 = 30;
614 u.cfg.t16t26 = 30;
615 u.cfg.t28 = 30;
616 u.cfg.r10r20 = 5;
617 u.cfg.r12r22 = 5;
618 u.cfg.r13r23 = 5;
619 u.cfg.responseOpt = 1; /* RR's after every packet */
620
621 if (card->u.x.LAPB_hdlc){
622 u.cfg.hdlcMTU = 1027;
623 }
624
625 if (conf->u.x25.x25_conf_opt){
626 u.cfg.options = conf->u.x25.x25_conf_opt;
627 }
628
629 if (conf->clocking != WANOPT_EXTERNAL)
630 u.cfg.baudRate = bps_to_speed_code(conf->bps);
631
632 if (conf->station != WANOPT_DTE){
633 u.cfg.station = 0; /* DCE mode */
634 }
635
636 if (conf->interface != WANOPT_RS232 ){
637 u.cfg.hdlcOptions |= 0x80; /* V35 mode */
638 }
639
640 /* adjust MTU */
641 if (!conf->mtu || (conf->mtu >= 1024))
642 card->wandev.mtu = 1024;
643 else if (conf->mtu >= 512)
644 card->wandev.mtu = 512;
645 else if (conf->mtu >= 256)
646 card->wandev.mtu = 256;
647 else if (conf->mtu >= 128)
648 card->wandev.mtu = 128;
649 else
650 card->wandev.mtu = 64;
651
652 u.cfg.defPktSize = u.cfg.pktMTU = card->wandev.mtu;
653
654 if (conf->u.x25.hi_pvc){
655 card->u.x.hi_pvc = min_t(unsigned int, conf->u.x25.hi_pvc, MAX_LCN_NUM);
656 card->u.x.lo_pvc = min_t(unsigned int, conf->u.x25.lo_pvc, card->u.x.hi_pvc);
657 }
658
659 if (conf->u.x25.hi_svc){
660 card->u.x.hi_svc = min_t(unsigned int, conf->u.x25.hi_svc, MAX_LCN_NUM);
661 card->u.x.lo_svc = min_t(unsigned int, conf->u.x25.lo_svc, card->u.x.hi_svc);
662 }
663
664 /* Figure out the total number of channels to configure */
665 card->u.x.num_of_ch = 0;
666 if (card->u.x.hi_svc != 0){
667 card->u.x.num_of_ch = (card->u.x.hi_svc - card->u.x.lo_svc) + 1;
668 }
669 if (card->u.x.hi_pvc != 0){
670 card->u.x.num_of_ch += (card->u.x.hi_pvc - card->u.x.lo_pvc) + 1;
671 }
672
673 if (card->u.x.num_of_ch == 0){
674 printk(KERN_INFO "%s: ERROR, Minimum number of PVC/SVC channels is 1 !\n"
675 "%s: Please set the Lowest/Highest PVC/SVC values !\n",
676 card->devname,card->devname);
677 return -ECHRNG;
678 }
679
680 u.cfg.loPVC = card->u.x.lo_pvc;
681 u.cfg.hiPVC = card->u.x.hi_pvc;
682 u.cfg.loTwoWaySVC = card->u.x.lo_svc;
683 u.cfg.hiTwoWaySVC = card->u.x.hi_svc;
684
685 if (conf->u.x25.hdlc_window)
686 u.cfg.hdlcWindow = min_t(unsigned int, conf->u.x25.hdlc_window, 7);
687 if (conf->u.x25.pkt_window)
688 u.cfg.pktWindow = min_t(unsigned int, conf->u.x25.pkt_window, 7);
689
690 if (conf->u.x25.t1)
691 u.cfg.t1 = min_t(unsigned int, conf->u.x25.t1, 30);
692 if (conf->u.x25.t2)
693 u.cfg.t2 = min_t(unsigned int, conf->u.x25.t2, 29);
694 if (conf->u.x25.t4)
695 u.cfg.t4 = min_t(unsigned int, conf->u.x25.t4, 240);
696 if (conf->u.x25.n2)
697 u.cfg.n2 = min_t(unsigned int, conf->u.x25.n2, 30);
698
699 if (conf->u.x25.t10_t20)
700 u.cfg.t10t20 = min_t(unsigned int, conf->u.x25.t10_t20,255);
701 if (conf->u.x25.t11_t21)
702 u.cfg.t11t21 = min_t(unsigned int, conf->u.x25.t11_t21,255);
703 if (conf->u.x25.t12_t22)
704 u.cfg.t12t22 = min_t(unsigned int, conf->u.x25.t12_t22,255);
705 if (conf->u.x25.t13_t23)
706 u.cfg.t13t23 = min_t(unsigned int, conf->u.x25.t13_t23,255);
707 if (conf->u.x25.t16_t26)
708 u.cfg.t16t26 = min_t(unsigned int, conf->u.x25.t16_t26, 255);
709 if (conf->u.x25.t28)
710 u.cfg.t28 = min_t(unsigned int, conf->u.x25.t28, 255);
711
712 if (conf->u.x25.r10_r20)
713 u.cfg.r10r20 = min_t(unsigned int, conf->u.x25.r10_r20,250);
714 if (conf->u.x25.r12_r22)
715 u.cfg.r12r22 = min_t(unsigned int, conf->u.x25.r12_r22,250);
716 if (conf->u.x25.r13_r23)
717 u.cfg.r13r23 = min_t(unsigned int, conf->u.x25.r13_r23,250);
718
719
720 if (conf->u.x25.ccitt_compat)
721 u.cfg.ccittCompat = conf->u.x25.ccitt_compat;
722
723 /* initialize adapter */
724 if (card->u.x.LAPB_hdlc){
725 if (hdlc_configure(card, &u.cfg) != CMD_OK)
726 return -EIO;
727 }else{
728 if (x25_configure(card, &u.cfg) != CMD_OK)
729 return -EIO;
730 }
731
732 if ((x25_close_hdlc(card) != CMD_OK) || /* close HDLC link */
733 (x25_set_dtr(card, 0) != CMD_OK)) /* drop DTR */
734 return -EIO;
735
736 /* Initialize protocol-specific fields of adapter data space */
737 card->wandev.bps = conf->bps;
738 card->wandev.interface = conf->interface;
739 card->wandev.clocking = conf->clocking;
740 card->wandev.station = conf->station;
741 card->isr = &wpx_isr;
742 card->poll = NULL; //&wpx_poll;
743 card->disable_comm = &disable_comm;
744 card->exec = &wpx_exec;
745 card->wandev.update = &update;
746 card->wandev.new_if = &new_if;
747 card->wandev.del_if = &del_if;
748
749 /* WARNING: This function cannot exit with an error
750 * after the change of state */
751 card->wandev.state = WAN_DISCONNECTED;
752
753 card->wandev.enable_tx_int = 0;
754 card->irq_dis_if_send_count = 0;
755 card->irq_dis_poll_count = 0;
756 card->u.x.tx_dev = NULL;
757 card->u.x.no_dev = 0;
758
759
760 /* Configure for S514 PCI Card */
761 if (card->hw.type == SDLA_S514) {
762 card->u.x.hdlc_buf_status =
763 (volatile unsigned char *)
764 (card->hw.dpmbase + X25_MB_VECTOR+ X25_MISC_HDLC_BITS);
765 }else{
766 card->u.x.hdlc_buf_status =
767 (volatile unsigned char *)(card->hw.dpmbase + X25_MISC_HDLC_BITS);
768 }
769
770 card->u.x.poll_device=NULL;
771 card->wandev.udp_port = conf->udp_port;
772
773 /* Enable or disable call setup logging */
774 if (conf->u.x25.logging == WANOPT_YES){
775 printk(KERN_INFO "%s: Enabling Call Logging.\n",
776 card->devname);
777 card->u.x.logging = 1;
778 }else{
779 card->u.x.logging = 0;
780 }
781
782 /* Enable or disable modem status reporting */
783 if (conf->u.x25.oob_on_modem == WANOPT_YES){
784 printk(KERN_INFO "%s: Enabling OOB on Modem change.\n",
785 card->devname);
786 card->u.x.oob_on_modem = 1;
787 }else{
788 card->u.x.oob_on_modem = 0;
789 }
790
791 init_global_statistics(card);
792
793 INIT_WORK(&card->u.x.x25_poll_work, (void *)wpx_poll, card);
794
795 init_timer(&card->u.x.x25_timer);
796 card->u.x.x25_timer.data = (unsigned long)card;
797 card->u.x.x25_timer.function = x25_timer_routine;
798
799 return 0;
800}
801
802/*=========================================================
803 * WAN Device Driver Entry Points
804 *========================================================*/
805
806/*============================================================
807 * Name: update(), Update device status & statistics.
808 *
809 * Purpose: To provide debugging and statitical
810 * information to the /proc file system.
811 * /proc/net/wanrouter/wanpipe#
812 *
813 * Rationale: The /proc file system is used to collect
814 * information about the kernel and drivers.
815 * Using the /proc file system the user
816 * can see exactly what the sangoma drivers are
817 * doing. And in what state they are in.
818 *
819 * Description: Collect all driver statistical information
820 * and pass it to the top laywer.
821 *
822 * Since we have to execute a debugging command,
823 * to obtain firmware statitics, we trigger a
824 * UPDATE function within the timer interrtup.
825 * We wait until the timer update is complete.
826 * Once complete return the appropriate return
827 * code to indicate that the update was successful.
828 *
829 * Called by: device_stat() in wanmain.c
830 *
831 * Assumptions:
832 *
833 * Warnings: This function will degrade the performance
834 * of the router, since it uses the mailbox.
835 *
836 * Return: 0 OK
837 * <0 Failed (or busy).
838 */
839
840static int update(struct wan_device* wandev)
841{
842 volatile sdla_t* card;
843 TX25Status* status;
844 unsigned long timeout;
845
846 /* sanity checks */
847 if ((wandev == NULL) || (wandev->private == NULL))
848 return -EFAULT;
849
850 if (wandev->state == WAN_UNCONFIGURED)
851 return -ENODEV;
852
853 if (test_bit(SEND_CRIT, (void*)&wandev->critical))
854 return -EAGAIN;
855
856 if (!wandev->dev)
857 return -ENODEV;
858
859 card = wandev->private;
860 status = card->flags;
861
862 card->u.x.timer_int_enabled |= TMR_INT_ENABLED_UPDATE;
863 status->imask |= INTR_ON_TIMER;
864 timeout = jiffies;
865
866 for (;;){
867 if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_UPDATE)){
868 break;
869 }
870 if ((jiffies-timeout) > 1*HZ){
871 card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE;
872 return -EAGAIN;
873 }
874 }
875 return 0;
876}
877
878
879/*===================================================================
880 * Name: new_if
881 *
882 * Purpose: To allocate and initialize resources for a
883 * new logical channel.
884 *
885 * Rationale: A new channel can be added dynamically via
886 * ioctl call.
887 *
888 * Description: Allocate a private channel structure, x25_channel_t.
889 * Parse the user interface options from wanpipe#.conf
890 * configuration file.
891 * Bind the private are into the network device private
892 * area pointer (dev->priv).
893 * Prepare the network device structure for registration.
894 *
895 * Called by: ROUTER_IFNEW Ioctl call, from wanrouter_ioctl()
896 * (wanmain.c)
897 *
898 * Assumptions: None
899 *
900 * Warnings: None
901 *
902 * Return: 0 Ok
903 * <0 Failed (channel will not be created)
904 */
905static int new_if(struct wan_device* wandev, struct net_device* dev,
906 wanif_conf_t* conf)
907{
908 sdla_t* card = wandev->private;
909 x25_channel_t* chan;
910 int err = 0;
911
912 if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)){
913 printk(KERN_INFO "%s: invalid interface name!\n",
914 card->devname);
915 return -EINVAL;
916 }
917
918 if(card->wandev.new_if_cnt++ > 0 && card->u.x.LAPB_hdlc) {
919 printk(KERN_INFO "%s: Error: Running LAPB HDLC Mode !\n",
920 card->devname);
921 printk(KERN_INFO
922 "%s: Maximum number of network interfaces must be one !\n",
923 card->devname);
924 return -EEXIST;
925 }
926
927 /* allocate and initialize private data */
928 chan = kmalloc(sizeof(x25_channel_t), GFP_ATOMIC);
929 if (chan == NULL){
930 return -ENOMEM;
931 }
932
933 memset(chan, 0, sizeof(x25_channel_t));
934
935 /* Bug Fix: Seg Err on PVC startup
936 * It must be here since bind_lcn_to_dev expects
937 * it bellow */
938 dev->priv = chan;
939
940 strcpy(chan->name, conf->name);
941 chan->card = card;
942 chan->dev = dev;
943 chan->common.sk = NULL;
944 chan->common.func = NULL;
945 chan->common.rw_bind = 0;
946 chan->tx_skb = chan->rx_skb = NULL;
947
948 /* verify media address */
949 if (conf->addr[0] == '@'){ /* SVC */
950 chan->common.svc = 1;
951 strncpy(chan->addr, &conf->addr[1], WAN_ADDRESS_SZ);
952
953 /* Set channel timeouts (default if not specified) */
954 chan->idle_timeout = (conf->idle_timeout) ?
955 conf->idle_timeout : 90;
956 chan->hold_timeout = (conf->hold_timeout) ?
957 conf->hold_timeout : 10;
958
959 }else if (is_digit(conf->addr[0])){ /* PVC */
960 int lcn = dec_to_uint(conf->addr, 0);
961
962 if ((lcn >= card->u.x.lo_pvc) && (lcn <= card->u.x.hi_pvc)){
963 bind_lcn_to_dev (card, dev, lcn);
964 }else{
965 printk(KERN_ERR
966 "%s: PVC %u is out of range on interface %s!\n",
967 wandev->name, lcn, chan->name);
968 err = -EINVAL;
969 }
970 }else{
971 printk(KERN_ERR
972 "%s: invalid media address on interface %s!\n",
973 wandev->name, chan->name);
974 err = -EINVAL;
975 }
976
977 if(strcmp(conf->usedby, "WANPIPE") == 0){
978 printk(KERN_INFO "%s: Running in WANPIPE mode %s\n",
979 wandev->name, chan->name);
980 chan->common.usedby = WANPIPE;
981 chan->protocol = htons(ETH_P_IP);
982
983 }else if(strcmp(conf->usedby, "API") == 0){
984 chan->common.usedby = API;
985 printk(KERN_INFO "%s: Running in API mode %s\n",
986 wandev->name, chan->name);
987 chan->protocol = htons(X25_PROT);
988 }
989
990
991 if (err){
992 kfree(chan);
993 dev->priv = NULL;
994 return err;
995 }
996
997 chan->enable_IPX = conf->enable_IPX;
998
999 if (chan->enable_IPX)
1000 chan->protocol = htons(ETH_P_IPX);
1001
1002 if (conf->network_number)
1003 chan->network_number = conf->network_number;
1004 else
1005 chan->network_number = 0xDEADBEEF;
1006
1007 /* prepare network device data space for registration */
1008 strcpy(dev->name,chan->name);
1009
1010 dev->init = &if_init;
1011
1012 init_x25_channel_struct(chan);
1013
1014 return 0;
1015}
1016
1017/*===================================================================
1018 * Name: del_if(), Remove a logical channel.
1019 *
1020 * Purpose: To dynamically remove a logical channel.
1021 *
1022 * Rationale: Each logical channel should be dynamically
1023 * removable. This functin is called by an
1024 * IOCTL_IFDEL ioctl call or shutdown().
1025 *
1026 * Description: Do nothing.
1027 *
1028 * Called by: IOCTL_IFDEL : wanrouter_ioctl() from wanmain.c
1029 * shutdown() from sdlamain.c
1030 *
1031 * Assumptions:
1032 *
1033 * Warnings:
1034 *
1035 * Return: 0 Ok. Void function.
1036 */
1037
1038//FIXME Del IF Should be taken out now.
1039
1040static int del_if(struct wan_device* wandev, struct net_device* dev)
1041{
1042 return 0;
1043}
1044
1045
1046/*============================================================
1047 * Name: wpx_exec
1048 *
1049 * Description: Execute adapter interface command.
1050 * This option is currently dissabled.
1051 *===========================================================*/
1052
1053static int wpx_exec (struct sdla* card, void* u_cmd, void* u_data)
1054{
1055 return 0;
1056}
1057
1058/*============================================================
1059 * Name: disable_comm
1060 *
1061 * Description: Disable communications during shutdown.
1062 * Dont check return code because there is
1063 * nothing we can do about it.
1064 *
1065 * Warning: Dev and private areas are gone at this point.
1066 *===========================================================*/
1067
1068static void disable_comm(sdla_t* card)
1069{
1070 disable_comm_shutdown(card);
1071 del_timer(&card->u.x.x25_timer);
1072 return;
1073}
1074
1075
1076/*============================================================
1077 * Network Device Interface
1078 *===========================================================*/
1079
1080/*===================================================================
1081 * Name: if_init(), Netowrk Interface Initialization
1082 *
1083 * Purpose: To initialize a network interface device structure.
1084 *
1085 * Rationale: During network interface startup, the if_init
1086 * is called by the kernel to initialize the
1087 * netowrk device structure. Thus a driver
1088 * can customze a network device.
1089 *
1090 * Description: Initialize the netowrk device call back
1091 * routines. This is where we tell the kernel
1092 * which function to use when it wants to send
1093 * via our interface.
1094 * Furthermore, we initialize the device flags,
1095 * MTU and physical address of the board.
1096 *
1097 * Called by: Kernel (/usr/src/linux/net/core/dev.c)
1098 * (dev->init())
1099 *
1100 * Assumptions: None
1101 *
1102 * Warnings: None
1103 *
1104 * Return: 0 Ok : Void function.
1105 */
1106static int if_init(struct net_device* dev)
1107{
1108 x25_channel_t* chan = dev->priv;
1109 sdla_t* card = chan->card;
1110 struct wan_device* wandev = &card->wandev;
1111
1112 /* Initialize device driver entry points */
1113 dev->open = &if_open;
1114 dev->stop = &if_close;
1115 dev->hard_header = &if_header;
1116 dev->rebuild_header = &if_rebuild_hdr;
1117 dev->hard_start_xmit = &if_send;
1118 dev->get_stats = &if_stats;
1119 dev->tx_timeout = &if_tx_timeout;
1120 dev->watchdog_timeo = TX_TIMEOUT;
1121
1122 /* Initialize media-specific parameters */
1123 dev->type = ARPHRD_PPP; /* ARP h/w type */
1124 dev->flags |= IFF_POINTOPOINT;
1125 dev->flags |= IFF_NOARP;
1126
1127 if (chan->common.usedby == API){
1128 dev->mtu = X25_CHAN_MTU+sizeof(x25api_hdr_t);
1129 }else{
1130 dev->mtu = card->wandev.mtu;
1131 }
1132
1133 dev->hard_header_len = X25_HRDHDR_SZ; /* media header length */
1134 dev->addr_len = 2; /* hardware address length */
1135
1136 if (!chan->common.svc){
1137 *(unsigned short*)dev->dev_addr = htons(chan->common.lcn);
1138 }
1139
1140 /* Initialize hardware parameters (just for reference) */
1141 dev->irq = wandev->irq;
1142 dev->dma = wandev->dma;
1143 dev->base_addr = wandev->ioport;
1144 dev->mem_start = (unsigned long)wandev->maddr;
1145 dev->mem_end = wandev->maddr + wandev->msize - 1;
1146
1147 /* Set transmit buffer queue length */
1148 dev->tx_queue_len = 100;
1149 SET_MODULE_OWNER(dev);
1150
1151 /* FIXME Why are we doing this */
1152 set_chan_state(dev, WAN_DISCONNECTED);
1153 return 0;
1154}
1155
1156
1157/*===================================================================
1158 * Name: if_open(), Open/Bring up the Netowrk Interface
1159 *
1160 * Purpose: To bring up a network interface.
1161 *
1162 * Rationale:
1163 *
1164 * Description: Open network interface.
1165 * o prevent module from unloading by incrementing use count
1166 * o if link is disconnected then initiate connection
1167 *
1168 * Called by: Kernel (/usr/src/linux/net/core/dev.c)
1169 * (dev->open())
1170 *
1171 * Assumptions: None
1172 *
1173 * Warnings: None
1174 *
1175 * Return: 0 Ok
1176 * <0 Failure: Interface will not come up.
1177 */
1178
1179static int if_open(struct net_device* dev)
1180{
1181 x25_channel_t* chan = dev->priv;
1182 sdla_t* card = chan->card;
1183 struct timeval tv;
1184 unsigned long smp_flags;
1185
1186 if (netif_running(dev))
1187 return -EBUSY;
1188
1189 chan->tq_working = 0;
1190
1191 /* Initialize the workqueue */
1192 INIT_WORK(&chan->common.wanpipe_work, (void *)x25api_bh, dev);
1193
1194 /* Allocate and initialize BH circular buffer */
1195 /* Add 1 to MAX_BH_BUFF so we don't have test with (MAX_BH_BUFF-1) */
1196 chan->bh_head = kmalloc((sizeof(bh_data_t)*(MAX_BH_BUFF+1)),GFP_ATOMIC);
1197
1198 if (chan->bh_head == NULL){
1199 printk(KERN_INFO "%s: ERROR, failed to allocate memory ! BH_BUFFERS !\n",
1200 card->devname);
1201
1202 return -ENOBUFS;
1203 }
1204 memset(chan->bh_head,0,(sizeof(bh_data_t)*(MAX_BH_BUFF+1)));
1205 atomic_set(&chan->bh_buff_used, 0);
1206
1207 /* Increment the number of interfaces */
1208 ++card->u.x.no_dev;
1209
1210 wanpipe_open(card);
1211
1212 /* LAPB protocol only uses one interface, thus
1213 * start the protocol after it comes up. */
1214 if (card->u.x.LAPB_hdlc){
1215 if (card->open_cnt == 1){
1216 TX25Status* status = card->flags;
1217 S508_S514_lock(card, &smp_flags);
1218 x25_set_intr_mode(card, INTR_ON_TIMER);
1219 status->imask &= ~INTR_ON_TIMER;
1220 S508_S514_unlock(card, &smp_flags);
1221 }
1222 }else{
1223 /* X25 can have multiple interfaces thus, start the
1224 * protocol once all interfaces are up */
1225
1226 //FIXME: There is a bug here. If interface is
1227 //brought down and up, it will try to enable comm.
1228 if (card->open_cnt == card->u.x.num_of_ch){
1229
1230 S508_S514_lock(card, &smp_flags);
1231 connect(card);
1232 S508_S514_unlock(card, &smp_flags);
1233
1234 mod_timer(&card->u.x.x25_timer, jiffies + HZ);
1235 }
1236 }
1237 /* Device is not up until the we are in connected state */
1238 do_gettimeofday( &tv );
1239 chan->router_start_time = tv.tv_sec;
1240
1241 netif_start_queue(dev);
1242
1243 return 0;
1244}
1245
1246/*===================================================================
1247 * Name: if_close(), Close/Bring down the Netowrk Interface
1248 *
1249 * Purpose: To bring down a network interface.
1250 *
1251 * Rationale:
1252 *
1253 * Description: Close network interface.
1254 * o decrement use module use count
1255 *
1256 * Called by: Kernel (/usr/src/linux/net/core/dev.c)
1257 * (dev->close())
1258 * ifconfig <name> down: will trigger the kernel
1259 * which will call this function.
1260 *
1261 * Assumptions: None
1262 *
1263 * Warnings: None
1264 *
1265 * Return: 0 Ok
1266 * <0 Failure: Interface will not exit properly.
1267 */
1268static int if_close(struct net_device* dev)
1269{
1270 x25_channel_t* chan = dev->priv;
1271 sdla_t* card = chan->card;
1272 unsigned long smp_flags;
1273
1274 netif_stop_queue(dev);
1275
1276 if ((chan->common.state == WAN_CONNECTED) ||
1277 (chan->common.state == WAN_CONNECTING)){
1278 S508_S514_lock(card, &smp_flags);
1279 chan_disc(dev);
1280 S508_S514_unlock(card, &smp_flags);
1281 }
1282
1283 wanpipe_close(card);
1284
1285 S508_S514_lock(card, &smp_flags);
1286 if (chan->bh_head){
1287 int i;
1288 struct sk_buff *skb;
1289
1290 for (i=0; i<(MAX_BH_BUFF+1); i++){
1291 skb = ((bh_data_t *)&chan->bh_head[i])->skb;
1292 if (skb != NULL){
1293 dev_kfree_skb_any(skb);
1294 }
1295 }
1296 kfree(chan->bh_head);
1297 chan->bh_head=NULL;
1298 }
1299 S508_S514_unlock(card, &smp_flags);
1300
1301 /* If this is the last close, disconnect physical link */
1302 if (!card->open_cnt){
1303 S508_S514_lock(card, &smp_flags);
1304 disconnect(card);
1305 x25_set_intr_mode(card, 0);
1306 S508_S514_unlock(card, &smp_flags);
1307 }
1308
1309 /* Decrement the number of interfaces */
1310 --card->u.x.no_dev;
1311 return 0;
1312}
1313
1314/*======================================================================
1315 * Build media header.
1316 * o encapsulate packet according to encapsulation type.
1317 *
1318 * The trick here is to put packet type (Ethertype) into 'protocol'
1319 * field of the socket buffer, so that we don't forget it.
1320 * If encapsulation fails, set skb->protocol to 0 and discard
1321 * packet later.
1322 *
1323 * Return: media header length.
1324 *======================================================================*/
1325
1326static int if_header(struct sk_buff* skb, struct net_device* dev,
1327 unsigned short type, void* daddr, void* saddr,
1328 unsigned len)
1329{
1330 x25_channel_t* chan = dev->priv;
1331 int hdr_len = dev->hard_header_len;
1332
1333 skb->protocol = htons(type);
1334 if (!chan->protocol){
1335 hdr_len = wanrouter_encapsulate(skb, dev, type);
1336 if (hdr_len < 0){
1337 hdr_len = 0;
1338 skb->protocol = htons(0);
1339 }
1340 }
1341 return hdr_len;
1342}
1343
1344/*===============================================================
1345 * Re-build media header.
1346 *
1347 * Return: 1 physical address resolved.
1348 * 0 physical address not resolved
1349 *==============================================================*/
1350
1351static int if_rebuild_hdr (struct sk_buff* skb)
1352{
1353 struct net_device *dev = skb->dev;
1354 x25_channel_t* chan = dev->priv;
1355 sdla_t* card = chan->card;
1356
1357 printk(KERN_INFO "%s: rebuild_header() called for interface %s!\n",
1358 card->devname, dev->name);
1359 return 1;
1360}
1361
1362
1363/*============================================================================
1364 * Handle transmit timeout event from netif watchdog
1365 */
1366static void if_tx_timeout(struct net_device *dev)
1367{
1368 x25_channel_t* chan = dev->priv;
1369 sdla_t *card = chan->card;
1370
1371 /* If our device stays busy for at least 5 seconds then we will
1372 * kick start the device by making dev->tbusy = 0. We expect
1373 * that our device never stays busy more than 5 seconds. So this
1374 * is only used as a last resort.
1375 */
1376
1377 ++chan->if_send_stat.if_send_tbusy_timeout;
1378 printk (KERN_INFO "%s: Transmit timed out on %s\n",
1379 card->devname, dev->name);
1380 netif_wake_queue (dev);
1381}
1382
1383
1384/*=========================================================================
1385 * Send a packet on a network interface.
1386 * o set tbusy flag (marks start of the transmission).
1387 * o check link state. If link is not up, then drop the packet.
1388 * o check channel status. If it's down then initiate a call.
1389 * o pass a packet to corresponding WAN device.
1390 * o free socket buffer
1391 *
1392 * Return: 0 complete (socket buffer must be freed)
1393 * non-0 packet may be re-transmitted (tbusy must be set)
1394 *
1395 * Notes:
1396 * 1. This routine is called either by the protocol stack or by the "net
1397 * bottom half" (with interrupts enabled).
1398 * 2. Setting tbusy flag will inhibit further transmit requests from the
1399 * protocol stack and can be used for flow control with protocol layer.
1400 *
1401 *========================================================================*/
1402
1403static int if_send(struct sk_buff* skb, struct net_device* dev)
1404{
1405 x25_channel_t* chan = dev->priv;
1406 sdla_t* card = chan->card;
1407 TX25Status* status = card->flags;
1408 int udp_type;
1409 unsigned long smp_flags=0;
1410
1411 ++chan->if_send_stat.if_send_entry;
1412
1413 netif_stop_queue(dev);
1414
1415 /* No need to check frame length, since socket code
1416 * will perform the check for us */
1417
1418 chan->tick_counter = jiffies;
1419
1420 /* Critical region starts here */
1421 S508_S514_lock(card, &smp_flags);
1422
1423 if (test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)){
1424 printk(KERN_INFO "Hit critical in if_send()! %lx\n",card->wandev.critical);
1425 goto if_send_crit_exit;
1426 }
1427
1428 udp_type = udp_pkt_type(skb, card);
1429
1430 if(udp_type != UDP_INVALID_TYPE) {
1431
1432 if(store_udp_mgmt_pkt(udp_type, UDP_PKT_FRM_STACK, card, dev, skb,
1433 chan->common.lcn)) {
1434
1435 status->imask |= INTR_ON_TIMER;
1436 if (udp_type == UDP_XPIPE_TYPE){
1437 chan->if_send_stat.if_send_PIPE_request++;
1438 }
1439 }
1440 netif_start_queue(dev);
1441 clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
1442 S508_S514_unlock(card, &smp_flags);
1443 return 0;
1444 }
1445
1446 if (chan->transmit_length){
1447 //FIXME: This check doesn't make sense any more
1448 if (chan->common.state != WAN_CONNECTED){
1449 chan->transmit_length=0;
1450 atomic_set(&chan->common.driver_busy,0);
1451 }else{
1452 netif_stop_queue(dev);
1453 ++card->u.x.tx_interrupts_pending;
1454 status->imask |= INTR_ON_TX_FRAME;
1455 clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
1456 S508_S514_unlock(card, &smp_flags);
1457 return 1;
1458 }
1459 }
1460
1461 if (card->wandev.state != WAN_CONNECTED){
1462 ++chan->ifstats.tx_dropped;
1463 ++card->wandev.stats.tx_dropped;
1464 ++chan->if_send_stat.if_send_wan_disconnected;
1465
1466 }else if ( chan->protocol && (chan->protocol != skb->protocol)){
1467 printk(KERN_INFO
1468 "%s: unsupported Ethertype 0x%04X on interface %s!\n",
1469 chan->name, htons(skb->protocol), dev->name);
1470
1471 printk(KERN_INFO "PROTO %Xn", htons(chan->protocol));
1472 ++chan->ifstats.tx_errors;
1473 ++chan->ifstats.tx_dropped;
1474 ++card->wandev.stats.tx_dropped;
1475 ++chan->if_send_stat.if_send_protocol_error;
1476
1477 }else switch (chan->common.state){
1478
1479 case WAN_DISCONNECTED:
1480 /* Try to establish connection. If succeded, then start
1481 * transmission, else drop a packet.
1482 */
1483 if (chan->common.usedby == API){
1484 ++chan->ifstats.tx_dropped;
1485 ++card->wandev.stats.tx_dropped;
1486 break;
1487 }else{
1488 if (chan_connect(dev) != 0){
1489 ++chan->ifstats.tx_dropped;
1490 ++card->wandev.stats.tx_dropped;
1491 break;
1492 }
1493 }
1494 /* fall through */
1495
1496 case WAN_CONNECTED:
1497 if( skb->protocol == htons(ETH_P_IPX)) {
1498 if(chan->enable_IPX) {
1499 switch_net_numbers( skb->data,
1500 chan->network_number, 0);
1501 } else {
1502 ++card->wandev.stats.tx_dropped;
1503 ++chan->ifstats.tx_dropped;
1504 ++chan->if_send_stat.if_send_protocol_error;
1505 goto if_send_crit_exit;
1506 }
1507 }
1508 /* We never drop here, if cannot send than, copy
1509 * a packet into a transmit buffer
1510 */
1511 chan_send(dev, skb->data, skb->len, 0);
1512 break;
1513
1514 default:
1515 ++chan->ifstats.tx_dropped;
1516 ++card->wandev.stats.tx_dropped;
1517 break;
1518 }
1519
1520
1521if_send_crit_exit:
1522
1523 dev_kfree_skb_any(skb);
1524
1525 netif_start_queue(dev);
1526 clear_bit(SEND_CRIT,(void*)&card->wandev.critical);
1527 S508_S514_unlock(card, &smp_flags);
1528 return 0;
1529}
1530
1531/*============================================================================
1532 * Setup so that a frame can be transmitted on the occurrence of a transmit
1533 * interrupt.
1534 *===========================================================================*/
1535
1536static void setup_for_delayed_transmit(struct net_device* dev, void* buf,
1537 unsigned len)
1538{
1539 x25_channel_t* chan = dev->priv;
1540 sdla_t* card = chan->card;
1541 TX25Status* status = card->flags;
1542
1543 ++chan->if_send_stat.if_send_adptr_bfrs_full;
1544
1545 if(chan->transmit_length) {
1546 printk(KERN_INFO "%s: Error, transmit length set in delayed transmit!\n",
1547 card->devname);
1548 return;
1549 }
1550
1551 if (chan->common.usedby == API){
1552 if (len > X25_CHAN_MTU+sizeof(x25api_hdr_t)) {
1553 ++chan->ifstats.tx_dropped;
1554 ++card->wandev.stats.tx_dropped;
1555 printk(KERN_INFO "%s: Length is too big for delayed transmit\n",
1556 card->devname);
1557 return;
1558 }
1559 }else{
1560 if (len > X25_MAX_DATA) {
1561 ++chan->ifstats.tx_dropped;
1562 ++card->wandev.stats.tx_dropped;
1563 printk(KERN_INFO "%s: Length is too big for delayed transmit\n",
1564 card->devname);
1565 return;
1566 }
1567 }
1568
1569 chan->transmit_length = len;
1570 atomic_set(&chan->common.driver_busy,1);
1571 memcpy(chan->transmit_buffer, buf, len);
1572
1573 ++chan->if_send_stat.if_send_tx_int_enabled;
1574
1575 /* Enable Transmit Interrupt */
1576 ++card->u.x.tx_interrupts_pending;
1577 status->imask |= INTR_ON_TX_FRAME;
1578}
1579
1580
1581/*===============================================================
1582 * net_device_stats
1583 *
1584 * Get ethernet-style interface statistics.
1585 * Return a pointer to struct enet_statistics.
1586 *
1587 *==============================================================*/
1588static struct net_device_stats *if_stats(struct net_device* dev)
1589{
1590 x25_channel_t *chan = dev->priv;
1591
1592 if(chan == NULL)
1593 return NULL;
1594
1595 return &chan->ifstats;
1596}
1597
1598
1599/*
1600 * Interrupt Handlers
1601 */
1602
1603/*
1604 * X.25 Interrupt Service Routine.
1605 */
1606
1607static void wpx_isr (sdla_t* card)
1608{
1609 TX25Status* status = card->flags;
1610
1611 card->in_isr = 1;
1612 ++card->statistics.isr_entry;
1613
1614 if (test_bit(PERI_CRIT,(void*)&card->wandev.critical)){
1615 card->in_isr=0;
1616 status->iflags = 0;
1617 return;
1618 }
1619
1620 if (test_bit(SEND_CRIT, (void*)&card->wandev.critical)){
1621
1622 printk(KERN_INFO "%s: wpx_isr: wandev.critical set to 0x%02lx, int type = 0x%02x\n",
1623 card->devname, card->wandev.critical, status->iflags);
1624 card->in_isr = 0;
1625 status->iflags = 0;
1626 return;
1627 }
1628
1629 /* For all interrupts set the critical flag to CRITICAL_RX_INTR.
1630 * If the if_send routine is called with this flag set it will set
1631 * the enable transmit flag to 1. (for a delayed interrupt)
1632 */
1633 switch (status->iflags){
1634
1635 case RX_INTR_PENDING: /* receive interrupt */
1636 rx_intr(card);
1637 break;
1638
1639 case TX_INTR_PENDING: /* transmit interrupt */
1640 tx_intr(card);
1641 break;
1642
1643 case MODEM_INTR_PENDING: /* modem status interrupt */
1644 status_intr(card);
1645 break;
1646
1647 case X25_ASY_TRANS_INTR_PENDING: /* network event interrupt */
1648 event_intr(card);
1649 break;
1650
1651 case TIMER_INTR_PENDING:
1652 timer_intr(card);
1653 break;
1654
1655 default: /* unwanted interrupt */
1656 spur_intr(card);
1657 }
1658
1659 card->in_isr = 0;
1660 status->iflags = 0; /* clear interrupt condition */
1661}
1662
1663/*
1664 * Receive interrupt handler.
1665 * This routine handles fragmented IP packets using M-bit according to the
1666 * RFC1356.
1667 * o map ligical channel number to network interface.
1668 * o allocate socket buffer or append received packet to the existing one.
1669 * o if M-bit is reset (i.e. it's the last packet in a sequence) then
1670 * decapsulate packet and pass socket buffer to the protocol stack.
1671 *
1672 * Notes:
1673 * 1. When allocating a socket buffer, if M-bit is set then more data is
1674 * coming and we have to allocate buffer for the maximum IP packet size
1675 * expected on this channel.
1676 * 2. If something goes wrong and X.25 packet has to be dropped (e.g. no
1677 * socket buffers available) the whole packet sequence must be discarded.
1678 */
1679
1680static void rx_intr (sdla_t* card)
1681{
1682 TX25Mbox* rxmb = card->rxmb;
1683 unsigned lcn = rxmb->cmd.lcn;
1684 struct net_device* dev = find_channel(card,lcn);
1685 x25_channel_t* chan;
1686 struct sk_buff* skb=NULL;
1687
1688 if (dev == NULL){
1689 /* Invalid channel, discard packet */
1690 printk(KERN_INFO "%s: receiving on orphaned LCN %d!\n",
1691 card->devname, lcn);
1692 return;
1693 }
1694
1695 chan = dev->priv;
1696 chan->i_timeout_sofar = jiffies;
1697
1698
1699 /* Copy the data from the board, into an
1700 * skb buffer
1701 */
1702 if (wanpipe_pull_data_in_skb(card,dev,&skb)){
1703 ++chan->ifstats.rx_dropped;
1704 ++card->wandev.stats.rx_dropped;
1705 ++chan->rx_intr_stat.rx_intr_no_socket;
1706 ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
1707 return;
1708 }
1709
1710 dev->last_rx = jiffies; /* timestamp */
1711
1712
1713 /* ------------ API ----------------*/
1714
1715 if (chan->common.usedby == API){
1716
1717 if (bh_enqueue(dev, skb)){
1718 ++chan->ifstats.rx_dropped;
1719 ++card->wandev.stats.rx_dropped;
1720 ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
1721 dev_kfree_skb_any(skb);
1722 return;
1723 }
1724
1725 ++chan->ifstats.rx_packets;
1726 chan->ifstats.rx_bytes += skb->len;
1727
1728
1729 chan->rx_skb = NULL;
1730 if (!test_and_set_bit(0, &chan->tq_working)){
1731 wanpipe_queue_work(&chan->common.wanpipe_work);
1732 }
1733 return;
1734 }
1735
1736
1737 /* ------------- WANPIPE -------------------*/
1738
1739 /* set rx_skb to NULL so we won't access it later when kernel already owns it */
1740 chan->rx_skb=NULL;
1741
1742 /* Decapsulate packet, if necessary */
1743 if (!skb->protocol && !wanrouter_type_trans(skb, dev)){
1744 /* can't decapsulate packet */
1745 dev_kfree_skb_any(skb);
1746 ++chan->ifstats.rx_errors;
1747 ++chan->ifstats.rx_dropped;
1748 ++card->wandev.stats.rx_dropped;
1749 ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
1750
1751 }else{
1752 if( handle_IPXWAN(skb->data, chan->name,
1753 chan->enable_IPX, chan->network_number,
1754 skb->protocol)){
1755
1756 if( chan->enable_IPX ){
1757 if(chan_send(dev, skb->data, skb->len,0)){
1758 chan->tx_skb = skb;
1759 }else{
1760 dev_kfree_skb_any(skb);
1761 ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
1762 }
1763 }else{
1764 /* increment IPX packet dropped statistic */
1765 ++chan->ifstats.rx_dropped;
1766 ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
1767 }
1768 }else{
1769 skb->mac.raw = skb->data;
1770 chan->ifstats.rx_bytes += skb->len;
1771 ++chan->ifstats.rx_packets;
1772 ++chan->rx_intr_stat.rx_intr_bfr_passed_to_stack;
1773 netif_rx(skb);
1774 }
1775 }
1776
1777 return;
1778}
1779
1780
1781static int wanpipe_pull_data_in_skb(sdla_t *card, struct net_device *dev,
1782 struct sk_buff **skb)
1783{
1784 void *bufptr;
1785 TX25Mbox* rxmb = card->rxmb;
1786 unsigned len = rxmb->cmd.length; /* packet length */
1787 unsigned qdm = rxmb->cmd.qdm; /* Q,D and M bits */
1788 x25_channel_t *chan = dev->priv;
1789 struct sk_buff *new_skb = *skb;
1790
1791 if (chan->common.usedby == WANPIPE){
1792 if (chan->drop_sequence){
1793 if (!(qdm & 0x01)){
1794 chan->drop_sequence = 0;
1795 }
1796 return 1;
1797 }
1798 new_skb = chan->rx_skb;
1799 }else{
1800 /* Add on the API header to the received
1801 * data
1802 */
1803 len += sizeof(x25api_hdr_t);
1804 }
1805
1806 if (new_skb == NULL){
1807 int bufsize;
1808
1809 if (chan->common.usedby == WANPIPE){
1810 bufsize = (qdm & 0x01) ? dev->mtu : len;
1811 }else{
1812 bufsize = len;
1813 }
1814
1815 /* Allocate new socket buffer */
1816 new_skb = dev_alloc_skb(bufsize + dev->hard_header_len);
1817 if (new_skb == NULL){
1818 printk(KERN_INFO "%s: no socket buffers available!\n",
1819 card->devname);
1820 chan->drop_sequence = 1; /* set flag */
1821 ++chan->ifstats.rx_dropped;
1822 return 1;
1823 }
1824 }
1825
1826 if (skb_tailroom(new_skb) < len){
1827 /* No room for the packet. Call off the whole thing! */
1828 dev_kfree_skb_any(new_skb);
1829 if (chan->common.usedby == WANPIPE){
1830 chan->rx_skb = NULL;
1831 if (qdm & 0x01){
1832 chan->drop_sequence = 1;
1833 }
1834 }
1835
1836 printk(KERN_INFO "%s: unexpectedly long packet sequence "
1837 "on interface %s!\n", card->devname, dev->name);
1838 ++chan->ifstats.rx_length_errors;
1839 return 1;
1840 }
1841
1842 bufptr = skb_put(new_skb,len);
1843
1844
1845 if (chan->common.usedby == API){
1846 /* Fill in the x25api header
1847 */
1848 x25api_t * api_data = (x25api_t*)bufptr;
1849 api_data->hdr.qdm = rxmb->cmd.qdm;
1850 api_data->hdr.cause = rxmb->cmd.cause;
1851 api_data->hdr.diagn = rxmb->cmd.diagn;
1852 api_data->hdr.length = rxmb->cmd.length;
1853 memcpy(api_data->data, rxmb->data, rxmb->cmd.length);
1854 }else{
1855 memcpy(bufptr, rxmb->data, len);
1856 }
1857
1858 new_skb->dev = dev;
1859
1860 if (chan->common.usedby == API){
1861 new_skb->mac.raw = new_skb->data;
1862 new_skb->protocol = htons(X25_PROT);
1863 new_skb->pkt_type = WAN_PACKET_DATA;
1864 }else{
1865 new_skb->protocol = chan->protocol;
1866 chan->rx_skb = new_skb;
1867 }
1868
1869 /* If qdm bit is set, more data is coming
1870 * thus, exit and wait for more data before
1871 * sending the packet up. (Used by router only)
1872 */
1873 if ((qdm & 0x01) && (chan->common.usedby == WANPIPE))
1874 return 1;
1875
1876 *skb = new_skb;
1877
1878 return 0;
1879}
1880
1881/*===============================================================
1882 * tx_intr
1883 *
1884 * Transmit interrupt handler.
1885 * For each dev, check that there is something to send.
1886 * If data available, transmit.
1887 *
1888 *===============================================================*/
1889
1890static void tx_intr (sdla_t* card)
1891{
1892 struct net_device *dev;
1893 TX25Status* status = card->flags;
1894 unsigned char more_to_tx=0;
1895 x25_channel_t *chan=NULL;
1896 int i=0;
1897
1898 if (card->u.x.tx_dev == NULL){
1899 card->u.x.tx_dev = card->wandev.dev;
1900 }
1901
1902 dev = card->u.x.tx_dev;
1903
1904 for (;;){
1905
1906 chan = dev->priv;
1907 if (chan->transmit_length){
1908 /* Device was set to transmit, check if the TX
1909 * buffers are available
1910 */
1911 if (chan->common.state != WAN_CONNECTED){
1912 chan->transmit_length = 0;
1913 atomic_set(&chan->common.driver_busy,0);
1914 chan->tx_offset=0;
1915 if (netif_queue_stopped(dev)){
1916 if (chan->common.usedby == API){
1917 netif_start_queue(dev);
1918 wakeup_sk_bh(dev);
1919 }else{
1920 netif_wake_queue(dev);
1921 }
1922 }
1923 dev = move_dev_to_next(card,dev);
1924 break;
1925 }
1926
1927 if ((status->cflags[chan->ch_idx] & 0x40 || card->u.x.LAPB_hdlc) &&
1928 (*card->u.x.hdlc_buf_status & 0x40) ){
1929 /* Tx buffer available, we can send */
1930
1931 if (tx_intr_send(card, dev)){
1932 more_to_tx=1;
1933 }
1934
1935 /* If more than one interface present, move the
1936 * device pointer to the next interface, so on the
1937 * next TX interrupt we will try sending from it.
1938 */
1939 dev = move_dev_to_next(card,dev);
1940 break;
1941 }else{
1942 /* Tx buffers not available, but device set
1943 * the TX interrupt. Set more_to_tx and try
1944 * to transmit for other devices.
1945 */
1946 more_to_tx=1;
1947 dev = move_dev_to_next(card,dev);
1948 }
1949
1950 }else{
1951 /* This device was not set to transmit,
1952 * go to next
1953 */
1954 dev = move_dev_to_next(card,dev);
1955 }
1956
1957 if (++i == card->u.x.no_dev){
1958 if (!more_to_tx){
1959 DBG_PRINTK(KERN_INFO "%s: Nothing to Send in TX INTR\n",
1960 card->devname);
1961 }
1962 break;
1963 }
1964
1965 } //End of FOR
1966
1967 card->u.x.tx_dev = dev;
1968
1969 if (!more_to_tx){
1970 /* if any other interfaces have transmit interrupts pending, */
1971 /* do not disable the global transmit interrupt */
1972 if (!(--card->u.x.tx_interrupts_pending)){
1973 status->imask &= ~INTR_ON_TX_FRAME;
1974 }
1975 }
1976 return;
1977}
1978
1979/*===============================================================
1980 * move_dev_to_next
1981 *
1982 *
1983 *===============================================================*/
1984
1985
1986struct net_device *move_dev_to_next(sdla_t *card, struct net_device *dev)
1987{
1988 if (card->u.x.no_dev != 1){
1989 if (!*((struct net_device **)dev->priv))
1990 return card->wandev.dev;
1991 else
1992 return *((struct net_device **)dev->priv);
1993 }
1994 return dev;
1995}
1996
1997/*===============================================================
1998 * tx_intr_send
1999 *
2000 *
2001 *===============================================================*/
2002
2003static int tx_intr_send(sdla_t *card, struct net_device *dev)
2004{
2005 x25_channel_t* chan = dev->priv;
2006
2007 if (chan_send (dev,chan->transmit_buffer,chan->transmit_length,1)){
2008
2009 /* Packet was split up due to its size, do not disable
2010 * tx_intr
2011 */
2012 return 1;
2013 }
2014
2015 chan->transmit_length=0;
2016 atomic_set(&chan->common.driver_busy,0);
2017 chan->tx_offset=0;
2018
2019 /* If we are in API mode, wakeup the
2020 * sock BH handler, not the NET_BH */
2021 if (netif_queue_stopped(dev)){
2022 if (chan->common.usedby == API){
2023 netif_start_queue(dev);
2024 wakeup_sk_bh(dev);
2025 }else{
2026 netif_wake_queue(dev);
2027 }
2028 }
2029 return 0;
2030}
2031
2032
2033/*===============================================================
2034 * timer_intr
2035 *
2036 * Timer interrupt handler.
2037 * Check who called the timer interrupt and perform
2038 * action accordingly.
2039 *
2040 *===============================================================*/
2041
2042static void timer_intr (sdla_t *card)
2043{
2044 TX25Status* status = card->flags;
2045
2046 if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_CMD_EXEC){
2047
2048 if (timer_intr_cmd_exec(card) == 0){
2049 card->u.x.timer_int_enabled &=
2050 ~TMR_INT_ENABLED_CMD_EXEC;
2051 }
2052
2053 }else if(card->u.x.timer_int_enabled & TMR_INT_ENABLED_UDP_PKT) {
2054
2055 if ((*card->u.x.hdlc_buf_status & 0x40) &&
2056 card->u.x.udp_type == UDP_XPIPE_TYPE){
2057
2058 if(process_udp_mgmt_pkt(card)) {
2059 card->u.x.timer_int_enabled &=
2060 ~TMR_INT_ENABLED_UDP_PKT;
2061 }
2062 }
2063
2064 }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_POLL_ACTIVE) {
2065
2066 struct net_device *dev = card->u.x.poll_device;
2067 x25_channel_t *chan = NULL;
2068
2069 if (!dev){
2070 card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_ACTIVE;
2071 return;
2072 }
2073 chan = dev->priv;
2074
2075 printk(KERN_INFO
2076 "%s: Closing down Idle link %s on LCN %d\n",
2077 card->devname,chan->name,chan->common.lcn);
2078 chan->i_timeout_sofar = jiffies;
2079 chan_disc(dev);
2080 card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_ACTIVE;
2081 card->u.x.poll_device=NULL;
2082
2083 }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_POLL_CONNECT_ON) {
2084
2085 wanpipe_set_state(card, WAN_CONNECTED);
2086 if (card->u.x.LAPB_hdlc){
2087 struct net_device *dev = card->wandev.dev;
2088 set_chan_state(dev,WAN_CONNECTED);
2089 send_delayed_cmd_result(card,dev,card->mbox);
2090 }
2091
2092 /* 0x8F enable all interrupts */
2093 x25_set_intr_mode(card, INTR_ON_RX_FRAME|
2094 INTR_ON_TX_FRAME|
2095 INTR_ON_MODEM_STATUS_CHANGE|
2096 //INTR_ON_COMMAND_COMPLETE|
2097 X25_ASY_TRANS_INTR_PENDING |
2098 INTR_ON_TIMER |
2099 DIRECT_RX_INTR_USAGE
2100 );
2101
2102 status->imask &= ~INTR_ON_TX_FRAME; /* mask Tx interrupts */
2103 card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_CONNECT_ON;
2104
2105 }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_POLL_CONNECT_OFF) {
2106
2107 //printk(KERN_INFO "Poll connect, Turning OFF\n");
2108 disconnect(card);
2109 card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_CONNECT_OFF;
2110
2111 }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_POLL_DISCONNECT) {
2112
2113 //printk(KERN_INFO "POll disconnect, trying to connect\n");
2114 connect(card);
2115 card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_POLL_DISCONNECT;
2116
2117 }else if (card->u.x.timer_int_enabled & TMR_INT_ENABLED_UPDATE){
2118
2119 if (*card->u.x.hdlc_buf_status & 0x40){
2120 x25_get_err_stats(card);
2121 x25_get_stats(card);
2122 card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE;
2123 }
2124 }
2125
2126 if(!card->u.x.timer_int_enabled){
2127 //printk(KERN_INFO "Turning Timer Off \n");
2128 status->imask &= ~INTR_ON_TIMER;
2129 }
2130}
2131
2132/*====================================================================
2133 * Modem status interrupt handler.
2134 *===================================================================*/
2135static void status_intr (sdla_t* card)
2136{
2137
2138 /* Added to avoid Modem status message flooding */
2139 static TX25ModemStatus last_stat;
2140
2141 TX25Mbox* mbox = card->mbox;
2142 TX25ModemStatus *modem_status;
2143 struct net_device *dev;
2144 x25_channel_t *chan;
2145 int err;
2146
2147 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2148 mbox->cmd.command = X25_READ_MODEM_STATUS;
2149 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2150 if (err){
2151 x25_error(card, err, X25_READ_MODEM_STATUS, 0);
2152 }else{
2153
2154 modem_status = (TX25ModemStatus*)mbox->data;
2155
2156 /* Check if the last status was the same
2157 * if it was, do NOT print message again */
2158
2159 if (last_stat.status != modem_status->status){
2160
2161 printk(KERN_INFO "%s: Modem Status Change: DCD=%s, CTS=%s\n",
2162 card->devname,DCD(modem_status->status),CTS(modem_status->status));
2163
2164 last_stat.status = modem_status->status;
2165
2166 if (card->u.x.oob_on_modem){
2167
2168 mbox->cmd.pktType = mbox->cmd.command;
2169 mbox->cmd.result = 0x08;
2170
2171 /* Send a OOB to all connected sockets */
2172 for (dev = card->wandev.dev; dev;
2173 dev = *((struct net_device**)dev->priv)) {
2174 chan=dev->priv;
2175 if (chan->common.usedby == API){
2176 send_oob_msg(card,dev,mbox);
2177 }
2178 }
2179
2180 /* The modem OOB message will probably kill the
2181 * the link. If we don't clear the flag here,
2182 * a deadlock could occur */
2183 if (atomic_read(&card->u.x.command_busy)){
2184 atomic_set(&card->u.x.command_busy,0);
2185 }
2186 }
2187 }
2188 }
2189
2190 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2191 mbox->cmd.command = X25_HDLC_LINK_STATUS;
2192 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2193 if (err){
2194 x25_error(card, err, X25_HDLC_LINK_STATUS, 0);
2195 }
2196
2197}
2198
2199/*====================================================================
2200 * Network event interrupt handler.
2201 *===================================================================*/
2202static void event_intr (sdla_t* card)
2203{
2204 x25_fetch_events(card);
2205}
2206
2207/*====================================================================
2208 * Spurious interrupt handler.
2209 * o print a warning
2210 * o
2211 *====================================================================*/
2212
2213static void spur_intr (sdla_t* card)
2214{
2215 printk(KERN_INFO "%s: spurious interrupt!\n", card->devname);
2216}
2217
2218
2219/*
2220 * Background Polling Routines
2221 */
2222
2223/*====================================================================
2224 * Main polling routine.
2225 * This routine is repeatedly called by the WANPIPE 'thread' to allow for
2226 * time-dependent housekeeping work.
2227 *
2228 * Notes:
2229 * 1. This routine may be called on interrupt context with all interrupts
2230 * enabled. Beware!
2231 *====================================================================*/
2232
2233static void wpx_poll (sdla_t *card)
2234{
2235 if (!card->wandev.dev){
2236 goto wpx_poll_exit;
2237 }
2238
2239 if (card->open_cnt != card->u.x.num_of_ch){
2240 goto wpx_poll_exit;
2241 }
2242
2243 if (test_bit(PERI_CRIT,&card->wandev.critical)){
2244 goto wpx_poll_exit;
2245 }
2246
2247 if (test_bit(SEND_CRIT,&card->wandev.critical)){
2248 goto wpx_poll_exit;
2249 }
2250
2251 switch(card->wandev.state){
2252 case WAN_CONNECTED:
2253 poll_active(card);
2254 break;
2255
2256 case WAN_CONNECTING:
2257 poll_connecting(card);
2258 break;
2259
2260 case WAN_DISCONNECTED:
2261 poll_disconnected(card);
2262 break;
2263 }
2264
2265wpx_poll_exit:
2266 clear_bit(POLL_CRIT,&card->wandev.critical);
2267 return;
2268}
2269
2270static void trigger_x25_poll(sdla_t *card)
2271{
2272 schedule_work(&card->u.x.x25_poll_work);
2273}
2274
2275/*====================================================================
2276 * Handle physical link establishment phase.
2277 * o if connection timed out, disconnect the link.
2278 *===================================================================*/
2279
2280static void poll_connecting (sdla_t* card)
2281{
2282 volatile TX25Status* status = card->flags;
2283
2284 if (status->gflags & X25_HDLC_ABM){
2285
2286 timer_intr_exec (card, TMR_INT_ENABLED_POLL_CONNECT_ON);
2287
2288 }else if ((jiffies - card->state_tick) > CONNECT_TIMEOUT){
2289
2290 timer_intr_exec (card, TMR_INT_ENABLED_POLL_CONNECT_OFF);
2291
2292 }
2293}
2294
2295/*====================================================================
2296 * Handle physical link disconnected phase.
2297 * o if hold-down timeout has expired and there are open interfaces,
2298 * connect link.
2299 *===================================================================*/
2300
2301static void poll_disconnected (sdla_t* card)
2302{
2303 struct net_device *dev;
2304 x25_channel_t *chan;
2305 TX25Status* status = card->flags;
2306
2307 if (!card->u.x.LAPB_hdlc && card->open_cnt &&
2308 ((jiffies - card->state_tick) > HOLD_DOWN_TIME)){
2309 timer_intr_exec(card, TMR_INT_ENABLED_POLL_DISCONNECT);
2310 }
2311
2312
2313 if ((dev=card->wandev.dev) == NULL)
2314 return;
2315
2316 if ((chan=dev->priv) == NULL)
2317 return;
2318
2319 if (chan->common.usedby == API &&
2320 atomic_read(&chan->common.command) &&
2321 card->u.x.LAPB_hdlc){
2322
2323 if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_CMD_EXEC))
2324 card->u.x.timer_int_enabled |= TMR_INT_ENABLED_CMD_EXEC;
2325
2326 if (!(status->imask & INTR_ON_TIMER))
2327 status->imask |= INTR_ON_TIMER;
2328 }
2329
2330}
2331
2332/*====================================================================
2333 * Handle active link phase.
2334 * o fetch X.25 asynchronous events.
2335 * o kick off transmission on all interfaces.
2336 *===================================================================*/
2337
2338static void poll_active (sdla_t* card)
2339{
2340 struct net_device* dev;
2341 TX25Status* status = card->flags;
2342
2343 for (dev = card->wandev.dev; dev;
2344 dev = *((struct net_device **)dev->priv)){
2345 x25_channel_t* chan = dev->priv;
2346
2347 /* If SVC has been idle long enough, close virtual circuit */
2348 if ( chan->common.svc &&
2349 chan->common.state == WAN_CONNECTED &&
2350 chan->common.usedby == WANPIPE ){
2351
2352 if( (jiffies - chan->i_timeout_sofar) / HZ > chan->idle_timeout ){
2353 /* Close svc */
2354 card->u.x.poll_device=dev;
2355 timer_intr_exec (card, TMR_INT_ENABLED_POLL_ACTIVE);
2356 }
2357 }
2358
2359#ifdef PRINT_DEBUG
2360 chan->ifstats.tx_compressed = atomic_read(&chan->common.command);
2361 chan->ifstats.tx_errors = chan->common.state;
2362 chan->ifstats.rx_fifo_errors = atomic_read(&card->u.x.command_busy);
2363 ++chan->ifstats.tx_bytes;
2364
2365 chan->ifstats.rx_fifo_errors=atomic_read(&chan->common.disconnect);
2366 chan->ifstats.multicast=atomic_read(&chan->bh_buff_used);
2367 chan->ifstats.rx_length_errors=*card->u.x.hdlc_buf_status;
2368#endif
2369
2370 if (chan->common.usedby == API &&
2371 atomic_read(&chan->common.command) &&
2372 !card->u.x.LAPB_hdlc){
2373
2374 if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_CMD_EXEC))
2375 card->u.x.timer_int_enabled |= TMR_INT_ENABLED_CMD_EXEC;
2376
2377 if (!(status->imask & INTR_ON_TIMER))
2378 status->imask |= INTR_ON_TIMER;
2379 }
2380
2381 if ((chan->common.usedby == API) &&
2382 atomic_read(&chan->common.disconnect)){
2383
2384 if (chan->common.state == WAN_DISCONNECTED){
2385 atomic_set(&chan->common.disconnect,0);
2386 return;
2387 }
2388
2389 atomic_set(&chan->common.command,X25_CLEAR_CALL);
2390 if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_CMD_EXEC))
2391 card->u.x.timer_int_enabled |= TMR_INT_ENABLED_CMD_EXEC;
2392
2393 if (!(status->imask & INTR_ON_TIMER))
2394 status->imask |= INTR_ON_TIMER;
2395 }
2396 }
2397}
2398
2399static void timer_intr_exec(sdla_t *card, unsigned char TYPE)
2400{
2401 TX25Status* status = card->flags;
2402 card->u.x.timer_int_enabled |= TYPE;
2403 if (!(status->imask & INTR_ON_TIMER))
2404 status->imask |= INTR_ON_TIMER;
2405}
2406
2407
2408/*====================================================================
2409 * SDLA Firmware-Specific Functions
2410 *
2411 * Almost all X.25 commands can unexpetedly fail due to so called 'X.25
2412 * asynchronous events' such as restart, interrupt, incoming call request,
2413 * call clear request, etc. They can't be ignored and have to be delt with
2414 * immediately. To tackle with this problem we execute each interface
2415 * command in a loop until good return code is received or maximum number
2416 * of retries is reached. Each interface command returns non-zero return
2417 * code, an asynchronous event/error handler x25_error() is called.
2418 *====================================================================*/
2419
2420/*====================================================================
2421 * Read X.25 firmware version.
2422 * Put code version as ASCII string in str.
2423 *===================================================================*/
2424
2425static int x25_get_version (sdla_t* card, char* str)
2426{
2427 TX25Mbox* mbox = card->mbox;
2428 int retry = MAX_CMD_RETRY;
2429 int err;
2430
2431 do
2432 {
2433 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2434 mbox->cmd.command = X25_READ_CODE_VERSION;
2435 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2436 } while (err && retry-- &&
2437 x25_error(card, err, X25_READ_CODE_VERSION, 0));
2438
2439 if (!err && str)
2440 {
2441 int len = mbox->cmd.length;
2442
2443 memcpy(str, mbox->data, len);
2444 str[len] = '\0';
2445 }
2446 return err;
2447}
2448
2449/*====================================================================
2450 * Configure adapter.
2451 *===================================================================*/
2452
2453static int x25_configure (sdla_t* card, TX25Config* conf)
2454{
2455 TX25Mbox* mbox = card->mbox;
2456 int retry = MAX_CMD_RETRY;
2457 int err;
2458
2459 do{
2460 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2461 memcpy(mbox->data, (void*)conf, sizeof(TX25Config));
2462 mbox->cmd.length = sizeof(TX25Config);
2463 mbox->cmd.command = X25_SET_CONFIGURATION;
2464 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2465 } while (err && retry-- && x25_error(card, err, X25_SET_CONFIGURATION, 0));
2466 return err;
2467}
2468
2469/*====================================================================
2470 * Configure adapter for HDLC only.
2471 *===================================================================*/
2472
2473static int hdlc_configure (sdla_t* card, TX25Config* conf)
2474{
2475 TX25Mbox* mbox = card->mbox;
2476 int retry = MAX_CMD_RETRY;
2477 int err;
2478
2479 do{
2480 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2481 memcpy(mbox->data, (void*)conf, sizeof(TX25Config));
2482 mbox->cmd.length = sizeof(TX25Config);
2483 mbox->cmd.command = X25_HDLC_SET_CONFIG;
2484 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2485 } while (err && retry-- && x25_error(card, err, X25_SET_CONFIGURATION, 0));
2486
2487 return err;
2488}
2489
2490static int set_hdlc_level (sdla_t* card)
2491{
2492
2493 TX25Mbox* mbox = card->mbox;
2494 int retry = MAX_CMD_RETRY;
2495 int err;
2496
2497 do{
2498 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2499 mbox->cmd.command = SET_PROTOCOL_LEVEL;
2500 mbox->cmd.length = 1;
2501 mbox->data[0] = HDLC_LEVEL; //| DO_HDLC_LEVEL_ERROR_CHECKING;
2502 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2503 } while (err && retry-- && x25_error(card, err, SET_PROTOCOL_LEVEL, 0));
2504
2505 return err;
2506}
2507
2508
2509
2510/*====================================================================
2511 * Get communications error statistics.
2512 *====================================================================*/
2513
2514static int x25_get_err_stats (sdla_t* card)
2515{
2516 TX25Mbox* mbox = card->mbox;
2517 int retry = MAX_CMD_RETRY;
2518 int err;
2519
2520 do
2521 {
2522 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2523 mbox->cmd.command = X25_HDLC_READ_COMM_ERR;
2524 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2525 } while (err && retry-- && x25_error(card, err, X25_HDLC_READ_COMM_ERR, 0));
2526
2527 if (!err)
2528 {
2529 THdlcCommErr* stats = (void*)mbox->data;
2530
2531 card->wandev.stats.rx_over_errors = stats->rxOverrun;
2532 card->wandev.stats.rx_crc_errors = stats->rxBadCrc;
2533 card->wandev.stats.rx_missed_errors = stats->rxAborted;
2534 card->wandev.stats.tx_aborted_errors = stats->txAborted;
2535 }
2536 return err;
2537}
2538
2539/*====================================================================
2540 * Get protocol statistics.
2541 *===================================================================*/
2542
2543static int x25_get_stats (sdla_t* card)
2544{
2545 TX25Mbox* mbox = card->mbox;
2546 int retry = MAX_CMD_RETRY;
2547 int err;
2548
2549 do
2550 {
2551 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2552 mbox->cmd.command = X25_READ_STATISTICS;
2553 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2554 } while (err && retry-- && x25_error(card, err, X25_READ_STATISTICS, 0)) ;
2555
2556 if (!err)
2557 {
2558 TX25Stats* stats = (void*)mbox->data;
2559
2560 card->wandev.stats.rx_packets = stats->rxData;
2561 card->wandev.stats.tx_packets = stats->txData;
2562 }
2563 return err;
2564}
2565
2566/*====================================================================
2567 * Close HDLC link.
2568 *===================================================================*/
2569
2570static int x25_close_hdlc (sdla_t* card)
2571{
2572 TX25Mbox* mbox = card->mbox;
2573 int retry = MAX_CMD_RETRY;
2574 int err;
2575
2576 do
2577 {
2578 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2579 mbox->cmd.command = X25_HDLC_LINK_CLOSE;
2580 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2581 } while (err && retry-- && x25_error(card, err, X25_HDLC_LINK_CLOSE, 0));
2582
2583 return err;
2584}
2585
2586
2587/*====================================================================
2588 * Open HDLC link.
2589 *===================================================================*/
2590
2591static int x25_open_hdlc (sdla_t* card)
2592{
2593 TX25Mbox* mbox = card->mbox;
2594 int retry = MAX_CMD_RETRY;
2595 int err;
2596
2597 do
2598 {
2599 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2600 mbox->cmd.command = X25_HDLC_LINK_OPEN;
2601 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2602 } while (err && retry-- && x25_error(card, err, X25_HDLC_LINK_OPEN, 0));
2603
2604 return err;
2605}
2606
2607/*=====================================================================
2608 * Setup HDLC link.
2609 *====================================================================*/
2610static int x25_setup_hdlc (sdla_t* card)
2611{
2612 TX25Mbox* mbox = card->mbox;
2613 int retry = MAX_CMD_RETRY;
2614 int err;
2615
2616 do
2617 {
2618 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2619 mbox->cmd.command = X25_HDLC_LINK_SETUP;
2620 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2621 } while (err && retry-- && x25_error(card, err, X25_HDLC_LINK_SETUP, 0));
2622
2623 return err;
2624}
2625
2626/*====================================================================
2627 * Set (raise/drop) DTR.
2628 *===================================================================*/
2629
2630static int x25_set_dtr (sdla_t* card, int dtr)
2631{
2632 TX25Mbox* mbox = card->mbox;
2633 int retry = MAX_CMD_RETRY;
2634 int err;
2635
2636 do
2637 {
2638 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2639 mbox->data[0] = 0;
2640 mbox->data[2] = 0;
2641 mbox->data[1] = dtr ? 0x02 : 0x01;
2642 mbox->cmd.length = 3;
2643 mbox->cmd.command = X25_SET_GLOBAL_VARS;
2644 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2645 } while (err && retry-- && x25_error(card, err, X25_SET_GLOBAL_VARS, 0));
2646
2647 return err;
2648}
2649
2650/*====================================================================
2651 * Set interrupt mode.
2652 *===================================================================*/
2653
2654static int x25_set_intr_mode (sdla_t* card, int mode)
2655{
2656 TX25Mbox* mbox = card->mbox;
2657 int retry = MAX_CMD_RETRY;
2658 int err;
2659
2660 do
2661 {
2662 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2663 mbox->data[0] = mode;
2664 if (card->hw.fwid == SFID_X25_508){
2665 mbox->data[1] = card->hw.irq;
2666 mbox->data[2] = 2;
2667 mbox->cmd.length = 3;
2668 }else {
2669 mbox->cmd.length = 1;
2670 }
2671 mbox->cmd.command = X25_SET_INTERRUPT_MODE;
2672 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2673 } while (err && retry-- && x25_error(card, err, X25_SET_INTERRUPT_MODE, 0));
2674
2675 return err;
2676}
2677
2678/*====================================================================
2679 * Read X.25 channel configuration.
2680 *===================================================================*/
2681
2682static int x25_get_chan_conf (sdla_t* card, x25_channel_t* chan)
2683{
2684 TX25Mbox* mbox = card->mbox;
2685 int retry = MAX_CMD_RETRY;
2686 int lcn = chan->common.lcn;
2687 int err;
2688
2689 do{
2690 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2691 mbox->cmd.lcn = lcn;
2692 mbox->cmd.command = X25_READ_CHANNEL_CONFIG;
2693 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2694 } while (err && retry-- && x25_error(card, err, X25_READ_CHANNEL_CONFIG, lcn));
2695
2696 if (!err)
2697 {
2698 TX25Status* status = card->flags;
2699
2700 /* calculate an offset into the array of status bytes */
2701 if (card->u.x.hi_svc <= X25_MAX_CHAN){
2702
2703 chan->ch_idx = lcn - 1;
2704
2705 }else{
2706 int offset;
2707
2708 /* FIX: Apr 14 2000 : Nenad Corbic
2709 * The data field was being compared to 0x1F using
2710 * '&&' instead of '&'.
2711 * This caused X25API to fail for LCNs greater than 255.
2712 */
2713 switch (mbox->data[0] & 0x1F)
2714 {
2715 case 0x01:
2716 offset = status->pvc_map; break;
2717 case 0x03:
2718 offset = status->icc_map; break;
2719 case 0x07:
2720 offset = status->twc_map; break;
2721 case 0x0B:
2722 offset = status->ogc_map; break;
2723 default:
2724 offset = 0;
2725 }
2726 chan->ch_idx = lcn - 1 - offset;
2727 }
2728
2729 /* get actual transmit packet size on this channel */
2730 switch(mbox->data[1] & 0x38)
2731 {
2732 case 0x00:
2733 chan->tx_pkt_size = 16;
2734 break;
2735 case 0x08:
2736 chan->tx_pkt_size = 32;
2737 break;
2738 case 0x10:
2739 chan->tx_pkt_size = 64;
2740 break;
2741 case 0x18:
2742 chan->tx_pkt_size = 128;
2743 break;
2744 case 0x20:
2745 chan->tx_pkt_size = 256;
2746 break;
2747 case 0x28:
2748 chan->tx_pkt_size = 512;
2749 break;
2750 case 0x30:
2751 chan->tx_pkt_size = 1024;
2752 break;
2753 }
2754 if (card->u.x.logging)
2755 printk(KERN_INFO "%s: X.25 packet size on LCN %d is %d.\n",
2756 card->devname, lcn, chan->tx_pkt_size);
2757 }
2758 return err;
2759}
2760
2761/*====================================================================
2762 * Place X.25 call.
2763 *====================================================================*/
2764
2765static int x25_place_call (sdla_t* card, x25_channel_t* chan)
2766{
2767 TX25Mbox* mbox = card->mbox;
2768 int retry = MAX_CMD_RETRY;
2769 int err;
2770 char str[64];
2771
2772
2773 if (chan->protocol == htons(ETH_P_IP)){
2774 sprintf(str, "-d%s -uCC", chan->addr);
2775
2776 }else if (chan->protocol == htons(ETH_P_IPX)){
2777 sprintf(str, "-d%s -u800000008137", chan->addr);
2778
2779 }
2780
2781 do
2782 {
2783 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2784 strcpy(mbox->data, str);
2785 mbox->cmd.length = strlen(str);
2786 mbox->cmd.command = X25_PLACE_CALL;
2787 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2788 } while (err && retry-- && x25_error(card, err, X25_PLACE_CALL, 0));
2789
2790 if (!err){
2791 bind_lcn_to_dev (card, chan->dev, mbox->cmd.lcn);
2792 }
2793 return err;
2794}
2795
2796/*====================================================================
2797 * Accept X.25 call.
2798 *====================================================================*/
2799
2800static int x25_accept_call (sdla_t* card, int lcn, int qdm)
2801{
2802 TX25Mbox* mbox = card->mbox;
2803 int retry = MAX_CMD_RETRY;
2804 int err;
2805
2806 do
2807 {
2808 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2809 mbox->cmd.lcn = lcn;
2810 mbox->cmd.qdm = qdm;
2811 mbox->cmd.command = X25_ACCEPT_CALL;
2812 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2813 } while (err && retry-- && x25_error(card, err, X25_ACCEPT_CALL, lcn));
2814
2815 return err;
2816}
2817
2818/*====================================================================
2819 * Clear X.25 call.
2820 *====================================================================*/
2821
2822static int x25_clear_call (sdla_t* card, int lcn, int cause, int diagn)
2823{
2824 TX25Mbox* mbox = card->mbox;
2825 int retry = MAX_CMD_RETRY;
2826 int err;
2827
2828 do
2829 {
2830 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2831 mbox->cmd.lcn = lcn;
2832 mbox->cmd.cause = cause;
2833 mbox->cmd.diagn = diagn;
2834 mbox->cmd.command = X25_CLEAR_CALL;
2835 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2836 } while (err && retry-- && x25_error(card, err, X25_CLEAR_CALL, lcn));
2837
2838 return err;
2839}
2840
2841/*====================================================================
2842 * Send X.25 data packet.
2843 *====================================================================*/
2844
2845static int x25_send (sdla_t* card, int lcn, int qdm, int len, void* buf)
2846{
2847 TX25Mbox* mbox = card->mbox;
2848 int retry = MAX_CMD_RETRY;
2849 int err;
2850 unsigned char cmd;
2851
2852 if (card->u.x.LAPB_hdlc)
2853 cmd = X25_HDLC_WRITE;
2854 else
2855 cmd = X25_WRITE;
2856
2857 do
2858 {
2859 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2860 memcpy(mbox->data, buf, len);
2861 mbox->cmd.length = len;
2862 mbox->cmd.lcn = lcn;
2863
2864 if (card->u.x.LAPB_hdlc){
2865 mbox->cmd.pf = qdm;
2866 }else{
2867 mbox->cmd.qdm = qdm;
2868 }
2869
2870 mbox->cmd.command = cmd;
2871 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2872 } while (err && retry-- && x25_error(card, err, cmd , lcn));
2873
2874
2875 /* If buffers are busy the return code for LAPB HDLC is
2876 * 1. The above functions are looking for return code
2877 * of X25RES_NOT_READY if busy. */
2878
2879 if (card->u.x.LAPB_hdlc && err == 1){
2880 err = X25RES_NOT_READY;
2881 }
2882
2883 return err;
2884}
2885
2886/*====================================================================
2887 * Fetch X.25 asynchronous events.
2888 *===================================================================*/
2889
2890static int x25_fetch_events (sdla_t* card)
2891{
2892 TX25Status* status = card->flags;
2893 TX25Mbox* mbox = card->mbox;
2894 int err = 0;
2895
2896 if (status->gflags & 0x20)
2897 {
2898 memset(&mbox->cmd, 0, sizeof(TX25Cmd));
2899 mbox->cmd.command = X25_IS_DATA_AVAILABLE;
2900 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
2901 if (err) x25_error(card, err, X25_IS_DATA_AVAILABLE, 0);
2902 }
2903 return err;
2904}
2905
2906/*====================================================================
2907 * X.25 asynchronous event/error handler.
2908 * This routine is called each time interface command returns
2909 * non-zero return code to handle X.25 asynchronous events and
2910 * common errors. Return non-zero to repeat command or zero to
2911 * cancel it.
2912 *
2913 * Notes:
2914 * 1. This function may be called recursively, as handling some of the
2915 * asynchronous events (e.g. call request) requires execution of the
2916 * interface command(s) that, in turn, may also return asynchronous
2917 * events. To avoid re-entrancy problems we copy mailbox to dynamically
2918 * allocated memory before processing events.
2919 *====================================================================*/
2920
2921static int x25_error (sdla_t* card, int err, int cmd, int lcn)
2922{
2923 int retry = 1;
2924 unsigned dlen = ((TX25Mbox*)card->mbox)->cmd.length;
2925 TX25Mbox* mb;
2926
2927 mb = kmalloc(sizeof(TX25Mbox) + dlen, GFP_ATOMIC);
2928 if (mb == NULL)
2929 {
2930 printk(KERN_ERR "%s: x25_error() out of memory!\n",
2931 card->devname);
2932 return 0;
2933 }
2934 memcpy(mb, card->mbox, sizeof(TX25Mbox) + dlen);
2935 switch (err){
2936
2937 case X25RES_ASYNC_PACKET: /* X.25 asynchronous packet was received */
2938
2939 mb->data[dlen] = '\0';
2940
2941 switch (mb->cmd.pktType & 0x7F){
2942
2943 case ASE_CALL_RQST: /* incoming call */
2944 retry = incoming_call(card, cmd, lcn, mb);
2945 break;
2946
2947 case ASE_CALL_ACCEPTED: /* connected */
2948 retry = call_accepted(card, cmd, lcn, mb);
2949 break;
2950
2951 case ASE_CLEAR_RQST: /* call clear request */
2952 retry = call_cleared(card, cmd, lcn, mb);
2953 break;
2954
2955 case ASE_RESET_RQST: /* reset request */
2956 printk(KERN_INFO "%s: X.25 reset request on LCN %d! "
2957 "Cause:0x%02X Diagn:0x%02X\n",
2958 card->devname, mb->cmd.lcn, mb->cmd.cause,
2959 mb->cmd.diagn);
2960 api_oob_event (card,mb);
2961 break;
2962
2963 case ASE_RESTART_RQST: /* restart request */
2964 retry = restart_event(card, cmd, lcn, mb);
2965 break;
2966
2967 case ASE_CLEAR_CONFRM:
2968 if (clear_confirm_event (card,mb))
2969 break;
2970
2971 /* I use the goto statement here so if
2972 * somebody inserts code between the
2973 * case and default, we will not have
2974 * ghost problems */
2975
2976 goto dflt_1;
2977
2978 default:
2979dflt_1:
2980 printk(KERN_INFO "%s: X.25 event 0x%02X on LCN %d! "
2981 "Cause:0x%02X Diagn:0x%02X\n",
2982 card->devname, mb->cmd.pktType,
2983 mb->cmd.lcn, mb->cmd.cause, mb->cmd.diagn);
2984 }
2985 break;
2986
2987 case X25RES_PROTO_VIOLATION: /* X.25 protocol violation indication */
2988
2989 /* Bug Fix: Mar 14 2000
2990 * The Protocol violation error conditions were
2991 * not handled previously */
2992
2993 switch (mb->cmd.pktType & 0x7F){
2994
2995 case PVE_CLEAR_RQST: /* Clear request */
2996 retry = call_cleared(card, cmd, lcn, mb);
2997 break;
2998
2999 case PVE_RESET_RQST: /* Reset request */
3000 printk(KERN_INFO "%s: X.25 reset request on LCN %d! "
3001 "Cause:0x%02X Diagn:0x%02X\n",
3002 card->devname, mb->cmd.lcn, mb->cmd.cause,
3003 mb->cmd.diagn);
3004 api_oob_event (card,mb);
3005 break;
3006
3007 case PVE_RESTART_RQST: /* Restart request */
3008 retry = restart_event(card, cmd, lcn, mb);
3009 break;
3010
3011 default :
3012 printk(KERN_INFO
3013 "%s: X.25 protocol violation on LCN %d! "
3014 "Packet:0x%02X Cause:0x%02X Diagn:0x%02X\n",
3015 card->devname, mb->cmd.lcn,
3016 mb->cmd.pktType & 0x7F, mb->cmd.cause, mb->cmd.diagn);
3017 api_oob_event(card,mb);
3018 }
3019 break;
3020
3021 case 0x42: /* X.25 timeout */
3022 retry = timeout_event(card, cmd, lcn, mb);
3023 break;
3024
3025 case 0x43: /* X.25 retry limit exceeded */
3026 printk(KERN_INFO
3027 "%s: exceeded X.25 retry limit on LCN %d! "
3028 "Packet:0x%02X Diagn:0x%02X\n", card->devname,
3029 mb->cmd.lcn, mb->cmd.pktType, mb->cmd.diagn)
3030 ;
3031 break;
3032
3033 case 0x08: /* modem failure */
3034#ifndef MODEM_NOT_LOG
3035 printk(KERN_INFO "%s: modem failure!\n", card->devname);
3036#endif /* MODEM_NOT_LOG */
3037 api_oob_event(card,mb);
3038 break;
3039
3040 case 0x09: /* N2 retry limit */
3041 printk(KERN_INFO "%s: exceeded HDLC retry limit!\n",
3042 card->devname);
3043 api_oob_event(card,mb);
3044 break;
3045
3046 case 0x06: /* unnumbered frame was received while in ABM */
3047 printk(KERN_INFO "%s: received Unnumbered frame 0x%02X!\n",
3048 card->devname, mb->data[0]);
3049 api_oob_event(card,mb);
3050 break;
3051
3052 case CMD_TIMEOUT:
3053 printk(KERN_ERR "%s: command 0x%02X timed out!\n",
3054 card->devname, cmd)
3055 ;
3056 retry = 0; /* abort command */
3057 break;
3058
3059 case X25RES_NOT_READY:
3060 retry = 1;
3061 break;
3062
3063 case 0x01:
3064 if (card->u.x.LAPB_hdlc)
3065 break;
3066
3067 if (mb->cmd.command == 0x16)
3068 break;
3069 /* I use the goto statement here so if
3070 * somebody inserts code between the
3071 * case and default, we will not have
3072 * ghost problems */
3073 goto dflt_2;
3074
3075 default:
3076dflt_2:
3077 printk(KERN_INFO "%s: command 0x%02X returned 0x%02X! Lcn %i\n",
3078 card->devname, cmd, err, mb->cmd.lcn)
3079 ;
3080 retry = 0; /* abort command */
3081 }
3082 kfree(mb);
3083 return retry;
3084}
3085
3086/*====================================================================
3087 * X.25 Asynchronous Event Handlers
3088 * These functions are called by the x25_error() and should return 0, if
3089 * the command resulting in the asynchronous event must be aborted.
3090 *====================================================================*/
3091
3092
3093
3094/*====================================================================
3095 *Handle X.25 incoming call request.
3096 * RFC 1356 establishes the following rules:
3097 * 1. The first octet in the Call User Data (CUD) field of the call
3098 * request packet contains NLPID identifying protocol encapsulation
3099 * 2. Calls MUST NOT be accepted unless router supports requested
3100 * protocol encapsulation.
3101 * 3. A diagnostic code 249 defined by ISO/IEC 8208 may be used
3102 * when clearing a call because protocol encapsulation is not
3103 * supported.
3104 * 4. If an incoming call is received while a call request is
3105 * pending (i.e. call collision has occurred), the incoming call
3106 * shall be rejected and call request shall be retried.
3107 *====================================================================*/
3108
3109static int incoming_call (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
3110{
3111 struct wan_device* wandev = &card->wandev;
3112 int new_lcn = mb->cmd.lcn;
3113 struct net_device* dev = get_dev_by_lcn(wandev, new_lcn);
3114 x25_channel_t* chan = NULL;
3115 int accept = 0; /* set to '1' if o.k. to accept call */
3116 unsigned int user_data;
3117 x25_call_info_t* info;
3118
3119 /* Make sure there is no call collision */
3120 if (dev != NULL)
3121 {
3122 printk(KERN_INFO
3123 "%s: X.25 incoming call collision on LCN %d!\n",
3124 card->devname, new_lcn);
3125
3126 x25_clear_call(card, new_lcn, 0, 0);
3127 return 1;
3128 }
3129
3130 /* Make sure D bit is not set in call request */
3131//FIXME: THIS IS NOT TURE !!!! TAKE IT OUT
3132// if (mb->cmd.qdm & 0x02)
3133// {
3134// printk(KERN_INFO
3135// "%s: X.25 incoming call on LCN %d with D-bit set!\n",
3136// card->devname, new_lcn);
3137//
3138// x25_clear_call(card, new_lcn, 0, 0);
3139// return 1;
3140// }
3141
3142 /* Parse call request data */
3143 info = kmalloc(sizeof(x25_call_info_t), GFP_ATOMIC);
3144 if (info == NULL)
3145 {
3146 printk(KERN_ERR
3147 "%s: not enough memory to parse X.25 incoming call "
3148 "on LCN %d!\n", card->devname, new_lcn);
3149 x25_clear_call(card, new_lcn, 0, 0);
3150 return 1;
3151 }
3152
3153 parse_call_info(mb->data, info);
3154
3155 if (card->u.x.logging)
3156 printk(KERN_INFO "\n%s: X.25 incoming call on LCN %d!\n",
3157 card->devname, new_lcn);
3158
3159 /* Conver the first two ASCII characters into an
3160 * interger. Used to check the incoming protocol
3161 */
3162 user_data = hex_to_uint(info->user,2);
3163
3164 /* Find available channel */
3165 for (dev = wandev->dev; dev; dev = *((struct net_device **)dev->priv)) {
3166 chan = dev->priv;
3167
3168 if (chan->common.usedby == API)
3169 continue;
3170
3171 if (!chan->common.svc || (chan->common.state != WAN_DISCONNECTED))
3172 continue;
3173
3174 if (user_data == NLPID_IP && chan->protocol != htons(ETH_P_IP)){
3175 printk(KERN_INFO "IP packet but configured for IPX : %x, %x\n",
3176 htons(chan->protocol), info->user[0]);
3177 continue;
3178 }
3179
3180 if (user_data == NLPID_SNAP && chan->protocol != htons(ETH_P_IPX)){
3181 printk(KERN_INFO "IPX packet but configured for IP: %x\n",
3182 htons(chan->protocol));
3183 continue;
3184 }
3185 if (strcmp(info->src, chan->addr) == 0)
3186 break;
3187
3188 /* If just an '@' is specified, accept all incoming calls */
3189 if (strcmp(chan->addr, "") == 0)
3190 break;
3191 }
3192
3193 if (dev == NULL){
3194
3195 /* If the call is not for any WANPIPE interfaces
3196 * check to see if there is an API listening queue
3197 * waiting for data. If there is send the packet
3198 * up the stack.
3199 */
3200 if (card->sk != NULL && card->func != NULL){
3201 if (api_incoming_call(card,mb,new_lcn)){
3202 x25_clear_call(card, new_lcn, 0, 0);
3203 }
3204 accept = 0;
3205 }else{
3206 printk(KERN_INFO "%s: no channels available!\n",
3207 card->devname);
3208
3209 x25_clear_call(card, new_lcn, 0, 0);
3210 }
3211
3212 }else if (info->nuser == 0){
3213
3214 printk(KERN_INFO
3215 "%s: no user data in incoming call on LCN %d!\n",
3216 card->devname, new_lcn)
3217 ;
3218 x25_clear_call(card, new_lcn, 0, 0);
3219
3220 }else switch (info->user[0]){
3221
3222 case 0: /* multiplexed */
3223 chan->protocol = htons(0);
3224 accept = 1;
3225 break;
3226
3227 case NLPID_IP: /* IP datagrams */
3228 accept = 1;
3229 break;
3230
3231 case NLPID_SNAP: /* IPX datagrams */
3232 accept = 1;
3233 break;
3234
3235 default:
3236 printk(KERN_INFO
3237 "%s: unsupported NLPID 0x%02X in incoming call "
3238 "on LCN %d!\n", card->devname, info->user[0], new_lcn);
3239 x25_clear_call(card, new_lcn, 0, 249);
3240 }
3241
3242 if (accept && (x25_accept_call(card, new_lcn, 0) == CMD_OK)){
3243
3244 bind_lcn_to_dev (card, chan->dev, new_lcn);
3245
3246 if (x25_get_chan_conf(card, chan) == CMD_OK)
3247 set_chan_state(dev, WAN_CONNECTED);
3248 else
3249 x25_clear_call(card, new_lcn, 0, 0);
3250 }
3251 kfree(info);
3252 return 1;
3253}
3254
3255/*====================================================================
3256 * Handle accepted call.
3257 *====================================================================*/
3258
3259static int call_accepted (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
3260{
3261 unsigned new_lcn = mb->cmd.lcn;
3262 struct net_device* dev = find_channel(card, new_lcn);
3263 x25_channel_t* chan;
3264
3265 if (dev == NULL){
3266 printk(KERN_INFO
3267 "%s: clearing orphaned connection on LCN %d!\n",
3268 card->devname, new_lcn);
3269 x25_clear_call(card, new_lcn, 0, 0);
3270 return 1;
3271 }
3272
3273 if (card->u.x.logging)
3274 printk(KERN_INFO "%s: X.25 call accepted on Dev %s and LCN %d!\n",
3275 card->devname, dev->name, new_lcn);
3276
3277 /* Get channel configuration and notify router */
3278 chan = dev->priv;
3279 if (x25_get_chan_conf(card, chan) != CMD_OK)
3280 {
3281 x25_clear_call(card, new_lcn, 0, 0);
3282 return 1;
3283 }
3284
3285 set_chan_state(dev, WAN_CONNECTED);
3286
3287 if (chan->common.usedby == API){
3288 send_delayed_cmd_result(card,dev,mb);
3289 bind_lcn_to_dev (card, dev, new_lcn);
3290 }
3291
3292 return 1;
3293}
3294
3295/*====================================================================
3296 * Handle cleared call.
3297 *====================================================================*/
3298
3299static int call_cleared (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
3300{
3301 unsigned new_lcn = mb->cmd.lcn;
3302 struct net_device* dev = find_channel(card, new_lcn);
3303 x25_channel_t *chan;
3304 unsigned char old_state;
3305
3306 if (card->u.x.logging){
3307 printk(KERN_INFO "%s: X.25 clear request on LCN %d! Cause:0x%02X "
3308 "Diagn:0x%02X\n",
3309 card->devname, new_lcn, mb->cmd.cause, mb->cmd.diagn);
3310 }
3311
3312 if (dev == NULL){
3313 printk(KERN_INFO "%s: X.25 clear request : No device for clear\n",
3314 card->devname);
3315 return 1;
3316 }
3317
3318 chan=dev->priv;
3319
3320 old_state = chan->common.state;
3321
3322 set_chan_state(dev, WAN_DISCONNECTED);
3323
3324 if (chan->common.usedby == API){
3325
3326 switch (old_state){
3327
3328 case WAN_CONNECTING:
3329 send_delayed_cmd_result(card,dev,mb);
3330 break;
3331 case WAN_CONNECTED:
3332 send_oob_msg(card,dev,mb);
3333 break;
3334 }
3335 }
3336
3337 return ((cmd == X25_WRITE) && (lcn == new_lcn)) ? 0 : 1;
3338}
3339
3340/*====================================================================
3341 * Handle X.25 restart event.
3342 *====================================================================*/
3343
3344static int restart_event (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
3345{
3346 struct wan_device* wandev = &card->wandev;
3347 struct net_device* dev;
3348 x25_channel_t *chan;
3349 unsigned char old_state;
3350
3351 printk(KERN_INFO
3352 "%s: X.25 restart request! Cause:0x%02X Diagn:0x%02X\n",
3353 card->devname, mb->cmd.cause, mb->cmd.diagn);
3354
3355 /* down all logical channels */
3356 for (dev = wandev->dev; dev; dev = *((struct net_device **)dev->priv)) {
3357 chan=dev->priv;
3358 old_state = chan->common.state;
3359
3360 set_chan_state(dev, WAN_DISCONNECTED);
3361
3362 if (chan->common.usedby == API){
3363 switch (old_state){
3364
3365 case WAN_CONNECTING:
3366 send_delayed_cmd_result(card,dev,mb);
3367 break;
3368 case WAN_CONNECTED:
3369 send_oob_msg(card,dev,mb);
3370 break;
3371 }
3372 }
3373 }
3374 return (cmd == X25_WRITE) ? 0 : 1;
3375}
3376
3377/*====================================================================
3378 * Handle timeout event.
3379 *====================================================================*/
3380
3381static int timeout_event (sdla_t* card, int cmd, int lcn, TX25Mbox* mb)
3382{
3383 unsigned new_lcn = mb->cmd.lcn;
3384
3385 if (mb->cmd.pktType == 0x05) /* call request time out */
3386 {
3387 struct net_device* dev = find_channel(card,new_lcn);
3388
3389 printk(KERN_INFO "%s: X.25 call timed timeout on LCN %d!\n",
3390 card->devname, new_lcn);
3391
3392 if (dev){
3393 x25_channel_t *chan = dev->priv;
3394 set_chan_state(dev, WAN_DISCONNECTED);
3395
3396 if (chan->common.usedby == API){
3397 send_delayed_cmd_result(card,dev,card->mbox);
3398 }
3399 }
3400 }else{
3401 printk(KERN_INFO "%s: X.25 packet 0x%02X timeout on LCN %d!\n",
3402 card->devname, mb->cmd.pktType, new_lcn);
3403 }
3404 return 1;
3405}
3406
3407/*
3408 * Miscellaneous
3409 */
3410
3411/*====================================================================
3412 * Establish physical connection.
3413 * o open HDLC and raise DTR
3414 *
3415 * Return: 0 connection established
3416 * 1 connection is in progress
3417 * <0 error
3418 *===================================================================*/
3419
3420static int connect (sdla_t* card)
3421{
3422 TX25Status* status = card->flags;
3423
3424 if (x25_open_hdlc(card) || x25_setup_hdlc(card))
3425 return -EIO;
3426
3427 wanpipe_set_state(card, WAN_CONNECTING);
3428
3429 x25_set_intr_mode(card, INTR_ON_TIMER);
3430 status->imask &= ~INTR_ON_TIMER;
3431
3432 return 1;
3433}
3434
3435/*
3436 * Tear down physical connection.
3437 * o close HDLC link
3438 * o drop DTR
3439 *
3440 * Return: 0
3441 * <0 error
3442 */
3443
3444static int disconnect (sdla_t* card)
3445{
3446 wanpipe_set_state(card, WAN_DISCONNECTED);
3447 x25_set_intr_mode(card, INTR_ON_TIMER); /* disable all interrupt except timer */
3448 x25_close_hdlc(card); /* close HDLC link */
3449 x25_set_dtr(card, 0); /* drop DTR */
3450 return 0;
3451}
3452
3453/*
3454 * Find network device by its channel number.
3455 */
3456
3457static struct net_device* get_dev_by_lcn(struct wan_device* wandev,
3458 unsigned lcn)
3459{
3460 struct net_device* dev;
3461
3462 for (dev = wandev->dev; dev; dev = *((struct net_device **)dev->priv))
3463 if (((x25_channel_t*)dev->priv)->common.lcn == lcn)
3464 break;
3465 return dev;
3466}
3467
3468/*
3469 * Initiate connection on the logical channel.
3470 * o for PVC we just get channel configuration
3471 * o for SVCs place an X.25 call
3472 *
3473 * Return: 0 connected
3474 * >0 connection in progress
3475 * <0 failure
3476 */
3477
3478static int chan_connect(struct net_device* dev)
3479{
3480 x25_channel_t* chan = dev->priv;
3481 sdla_t* card = chan->card;
3482
3483 if (chan->common.svc && chan->common.usedby == WANPIPE){
3484 if (!chan->addr[0]){
3485 printk(KERN_INFO "%s: No Destination Address\n",
3486 card->devname);
3487 return -EINVAL; /* no destination address */
3488 }
3489 printk(KERN_INFO "%s: placing X.25 call to %s ...\n",
3490 card->devname, chan->addr);
3491
3492 if (x25_place_call(card, chan) != CMD_OK)
3493 return -EIO;
3494
3495 set_chan_state(dev, WAN_CONNECTING);
3496 return 1;
3497 }else{
3498 if (x25_get_chan_conf(card, chan) != CMD_OK)
3499 return -EIO;
3500
3501 set_chan_state(dev, WAN_CONNECTED);
3502 }
3503 return 0;
3504}
3505
3506/*
3507 * Disconnect logical channel.
3508 * o if SVC then clear X.25 call
3509 */
3510
3511static int chan_disc(struct net_device* dev)
3512{
3513 x25_channel_t* chan = dev->priv;
3514
3515 if (chan->common.svc){
3516 x25_clear_call(chan->card, chan->common.lcn, 0, 0);
3517
3518 /* For API we disconnect on clear
3519 * confirmation.
3520 */
3521 if (chan->common.usedby == API)
3522 return 0;
3523 }
3524
3525 set_chan_state(dev, WAN_DISCONNECTED);
3526
3527 return 0;
3528}
3529
3530/*
3531 * Set logical channel state.
3532 */
3533
3534static void set_chan_state(struct net_device* dev, int state)
3535{
3536 x25_channel_t* chan = dev->priv;
3537 sdla_t* card = chan->card;
3538 unsigned long flags;
3539
3540 save_flags(flags);
3541 cli();
3542 if (chan->common.state != state)
3543 {
3544 switch (state)
3545 {
3546 case WAN_CONNECTED:
3547 if (card->u.x.logging){
3548 printk (KERN_INFO
3549 "%s: interface %s connected, lcn %i !\n",
3550 card->devname, dev->name,chan->common.lcn);
3551 }
3552 *(unsigned short*)dev->dev_addr = htons(chan->common.lcn);
3553 chan->i_timeout_sofar = jiffies;
3554
3555 /* LAPB is PVC Based */
3556 if (card->u.x.LAPB_hdlc)
3557 chan->common.svc=0;
3558 break;
3559
3560 case WAN_CONNECTING:
3561 if (card->u.x.logging){
3562 printk (KERN_INFO
3563 "%s: interface %s connecting, lcn %i ...\n",
3564 card->devname, dev->name, chan->common.lcn);
3565 }
3566 break;
3567
3568 case WAN_DISCONNECTED:
3569 if (card->u.x.logging){
3570 printk (KERN_INFO
3571 "%s: interface %s disconnected, lcn %i !\n",
3572 card->devname, dev->name,chan->common.lcn);
3573 }
3574 atomic_set(&chan->common.disconnect,0);
3575
3576 if (chan->common.svc) {
3577 *(unsigned short*)dev->dev_addr = 0;
3578 card->u.x.svc_to_dev_map[(chan->common.lcn%X25_MAX_CHAN)]=NULL;
3579 chan->common.lcn = 0;
3580 }
3581
3582 if (chan->transmit_length){
3583 chan->transmit_length=0;
3584 atomic_set(&chan->common.driver_busy,0);
3585 chan->tx_offset=0;
3586 if (netif_queue_stopped(dev)){
3587 netif_wake_queue(dev);
3588 }
3589 }
3590 atomic_set(&chan->common.command,0);
3591 break;
3592
3593 case WAN_DISCONNECTING:
3594 if (card->u.x.logging){
3595 printk (KERN_INFO
3596 "\n%s: interface %s disconnecting, lcn %i ...\n",
3597 card->devname, dev->name,chan->common.lcn);
3598 }
3599 atomic_set(&chan->common.disconnect,0);
3600 break;
3601 }
3602 chan->common.state = state;
3603 }
3604 chan->state_tick = jiffies;
3605 restore_flags(flags);
3606}
3607
3608/*
3609 * Send packet on a logical channel.
3610 * When this function is called, tx_skb field of the channel data
3611 * space points to the transmit socket buffer. When transmission
3612 * is complete, release socket buffer and reset 'tbusy' flag.
3613 *
3614 * Return: 0 - transmission complete
3615 * 1 - busy
3616 *
3617 * Notes:
3618 * 1. If packet length is greater than MTU for this channel, we'll fragment
3619 * the packet into 'complete sequence' using M-bit.
3620 * 2. When transmission is complete, an event notification should be issued
3621 * to the router.
3622 */
3623
3624static int chan_send(struct net_device* dev, void* buff, unsigned data_len,
3625 unsigned char tx_intr)
3626{
3627 x25_channel_t* chan = dev->priv;
3628 sdla_t* card = chan->card;
3629 TX25Status* status = card->flags;
3630 unsigned len=0, qdm=0, res=0, orig_len = 0;
3631 void *data;
3632
3633 /* Check to see if channel is ready */
3634 if ((!(status->cflags[chan->ch_idx] & 0x40) && !card->u.x.LAPB_hdlc) ||
3635 !(*card->u.x.hdlc_buf_status & 0x40)){
3636
3637 if (!tx_intr){
3638 setup_for_delayed_transmit (dev, buff, data_len);
3639 return 0;
3640 }else{
3641 /* By returning 0 to tx_intr the packet will be dropped */
3642 ++card->wandev.stats.tx_dropped;
3643 ++chan->ifstats.tx_dropped;
3644 printk(KERN_INFO "%s: ERROR, Tx intr could not send, dropping %s:\n",
3645 card->devname,dev->name);
3646 ++chan->if_send_stat.if_send_bfr_not_passed_to_adptr;
3647 return 0;
3648 }
3649 }
3650
3651 if (chan->common.usedby == API){
3652 /* Remove the API Header */
3653 x25api_hdr_t *api_data = (x25api_hdr_t *)buff;
3654
3655 /* Set the qdm bits from the packet header
3656 * User has the option to set the qdm bits
3657 */
3658 qdm = api_data->qdm;
3659
3660 orig_len = len = data_len - sizeof(x25api_hdr_t);
3661 data = (unsigned char*)buff + sizeof(x25api_hdr_t);
3662 }else{
3663 data = buff;
3664 orig_len = len = data_len;
3665 }
3666
3667 if (tx_intr){
3668 /* We are in tx_intr, minus the tx_offset from
3669 * the total length. The tx_offset part of the
3670 * data has already been sent. Also, move the
3671 * data pointer to proper offset location.
3672 */
3673 len -= chan->tx_offset;
3674 data = (unsigned char*)data + chan->tx_offset;
3675 }
3676
3677 /* Check if the packet length is greater than MTU
3678 * If YES: Cut the len to MTU and set the M bit
3679 */
3680 if (len > chan->tx_pkt_size && !card->u.x.LAPB_hdlc){
3681 len = chan->tx_pkt_size;
3682 qdm |= M_BIT;
3683 }
3684
3685
3686 /* Pass only first three bits of the qdm byte to the send
3687 * routine. In case user sets any other bit which might
3688 * cause errors.
3689 */
3690
3691 switch(x25_send(card, chan->common.lcn, (qdm&0x07), len, data)){
3692 case 0x00: /* success */
3693 chan->i_timeout_sofar = jiffies;
3694
3695 dev->trans_start=jiffies;
3696
3697 if ((qdm & M_BIT) && !card->u.x.LAPB_hdlc){
3698 if (!tx_intr){
3699 /* The M bit was set, which means that part of the
3700 * packet has been sent. Copy the packet into a buffer
3701 * and set the offset to len, so on next tx_inter
3702 * the packet will be sent using the below offset.
3703 */
3704 chan->tx_offset += len;
3705
3706 ++chan->ifstats.tx_packets;
3707 chan->ifstats.tx_bytes += len;
3708
3709 if (chan->tx_offset < orig_len){
3710 setup_for_delayed_transmit (dev, buff, data_len);
3711 }
3712 res=0;
3713 }else{
3714 /* We are already in tx_inter, thus data is already
3715 * in the buffer. Update the offset and wait for
3716 * next tx_intr. We add on to the offset, since data can
3717 * be X number of times larger than max data size.
3718 */
3719 ++chan->ifstats.tx_packets;
3720 chan->ifstats.tx_bytes += len;
3721
3722 ++chan->if_send_stat.if_send_bfr_passed_to_adptr;
3723 chan->tx_offset += len;
3724
3725 /* The user can set the qdm bit as well.
3726 * If the entire packet was sent and qdm is still
3727 * set, than it's the user who has set the M bit. In that,
3728 * case indicate that the packet was send by returning
3729 * 0 and wait for a new packet. Otherwise, wait for next
3730 * tx interrupt to send the rest of the packet */
3731
3732 if (chan->tx_offset < orig_len){
3733 res=1;
3734 }else{
3735 res=0;
3736 }
3737 }
3738 }else{
3739 ++chan->ifstats.tx_packets;
3740 chan->ifstats.tx_bytes += len;
3741 ++chan->if_send_stat.if_send_bfr_passed_to_adptr;
3742 res=0;
3743 }
3744 break;
3745
3746 case 0x33: /* Tx busy */
3747 if (tx_intr){
3748 printk(KERN_INFO "%s: Tx_intr: Big Error dropping packet %s\n",
3749 card->devname,dev->name);
3750 ++chan->ifstats.tx_dropped;
3751 ++card->wandev.stats.tx_dropped;
3752 ++chan->if_send_stat.if_send_bfr_not_passed_to_adptr;
3753 res=0;
3754 }else{
3755 DBG_PRINTK(KERN_INFO
3756 "%s: Send: Big Error should have tx: storring %s\n",
3757 card->devname,dev->name);
3758 setup_for_delayed_transmit (dev, buff, data_len);
3759 res=1;
3760 }
3761 break;
3762
3763 default: /* failure */
3764 ++chan->ifstats.tx_errors;
3765 if (tx_intr){
3766 printk(KERN_INFO "%s: Tx_intr: Failure to send, dropping %s\n",
3767 card->devname,dev->name);
3768 ++chan->ifstats.tx_dropped;
3769 ++card->wandev.stats.tx_dropped;
3770 ++chan->if_send_stat.if_send_bfr_not_passed_to_adptr;
3771 res=0;
3772 }else{
3773 DBG_PRINTK(KERN_INFO "%s: Send: Failure to send !!!, storing %s\n",
3774 card->devname,dev->name);
3775 setup_for_delayed_transmit (dev, buff, data_len);
3776 res=1;
3777 }
3778 break;
3779 }
3780 return res;
3781}
3782
3783
3784/*
3785 * Parse X.25 call request data and fill x25_call_info_t structure.
3786 */
3787
3788static void parse_call_info (unsigned char* str, x25_call_info_t* info)
3789{
3790 memset(info, 0, sizeof(x25_call_info_t));
3791 for (; *str; ++str)
3792 {
3793 int i;
3794 unsigned char ch;
3795
3796 if (*str == '-') switch (str[1]) {
3797
3798 /* Take minus 2 off the maximum size so that
3799 * last byte is 0. This way we can use string
3800 * manipulaton functions on call information.
3801 */
3802
3803 case 'd': /* destination address */
3804 for (i = 0; i < (MAX_X25_ADDR_SIZE-2); ++i){
3805 ch = str[2+i];
3806 if (isspace(ch)) break;
3807 info->dest[i] = ch;
3808 }
3809 break;
3810
3811 case 's': /* source address */
3812 for (i = 0; i < (MAX_X25_ADDR_SIZE-2); ++i){
3813 ch = str[2+i];
3814 if (isspace(ch)) break;
3815 info->src[i] = ch;
3816 }
3817 break;
3818
3819 case 'u': /* user data */
3820 for (i = 0; i < (MAX_X25_DATA_SIZE-2); ++i){
3821 ch = str[2+i];
3822 if (isspace(ch)) break;
3823 info->user[i] = ch;
3824 }
3825 info->nuser = i;
3826 break;
3827
3828 case 'f': /* facilities */
3829 for (i = 0; i < (MAX_X25_FACL_SIZE-2); ++i){
3830 ch = str[2+i];
3831 if (isspace(ch)) break;
3832 info->facil[i] = ch;
3833 }
3834 info->nfacil = i;
3835 break;
3836 }
3837 }
3838}
3839
3840/*
3841 * Convert line speed in bps to a number used by S502 code.
3842 */
3843
3844static unsigned char bps_to_speed_code (unsigned long bps)
3845{
3846 unsigned char number;
3847
3848 if (bps <= 1200) number = 0x01;
3849 else if (bps <= 2400) number = 0x02;
3850 else if (bps <= 4800) number = 0x03;
3851 else if (bps <= 9600) number = 0x04;
3852 else if (bps <= 19200) number = 0x05;
3853 else if (bps <= 38400) number = 0x06;
3854 else if (bps <= 45000) number = 0x07;
3855 else if (bps <= 56000) number = 0x08;
3856 else if (bps <= 64000) number = 0x09;
3857 else if (bps <= 74000) number = 0x0A;
3858 else if (bps <= 112000) number = 0x0B;
3859 else if (bps <= 128000) number = 0x0C;
3860 else number = 0x0D;
3861
3862 return number;
3863}
3864
3865/*
3866 * Convert decimal string to unsigned integer.
3867 * If len != 0 then only 'len' characters of the string are converted.
3868 */
3869
3870static unsigned int dec_to_uint (unsigned char* str, int len)
3871{
3872 unsigned val;
3873
3874 if (!len)
3875 len = strlen(str);
3876
3877 for (val = 0; len && is_digit(*str); ++str, --len)
3878 val = (val * 10) + (*str - (unsigned)'0');
3879
3880 return val;
3881}
3882
3883/*
3884 * Convert hex string to unsigned integer.
3885 * If len != 0 then only 'len' characters of the string are conferted.
3886 */
3887
3888static unsigned int hex_to_uint (unsigned char* str, int len)
3889{
3890 unsigned val, ch;
3891
3892 if (!len)
3893 len = strlen(str);
3894
3895 for (val = 0; len; ++str, --len)
3896 {
3897 ch = *str;
3898 if (is_digit(ch))
3899 val = (val << 4) + (ch - (unsigned)'0');
3900 else if (is_hex_digit(ch))
3901 val = (val << 4) + ((ch & 0xDF) - (unsigned)'A' + 10);
3902 else break;
3903 }
3904 return val;
3905}
3906
3907
3908static int handle_IPXWAN(unsigned char *sendpacket, char *devname, unsigned char enable_IPX, unsigned long network_number, unsigned short proto)
3909{
3910 int i;
3911
3912 if( proto == ETH_P_IPX) {
3913 /* It's an IPX packet */
3914 if(!enable_IPX) {
3915 /* Return 1 so we don't pass it up the stack. */
3916 return 1;
3917 }
3918 } else {
3919 /* It's not IPX so pass it up the stack.*/
3920 return 0;
3921 }
3922
3923 if( sendpacket[16] == 0x90 &&
3924 sendpacket[17] == 0x04)
3925 {
3926 /* It's IPXWAN */
3927
3928 if( sendpacket[2] == 0x02 &&
3929 sendpacket[34] == 0x00)
3930 {
3931 /* It's a timer request packet */
3932 printk(KERN_INFO "%s: Received IPXWAN Timer Request packet\n",devname);
3933
3934 /* Go through the routing options and answer no to every
3935 * option except Unnumbered RIP/SAP
3936 */
3937 for(i = 41; sendpacket[i] == 0x00; i += 5)
3938 {
3939 /* 0x02 is the option for Unnumbered RIP/SAP */
3940 if( sendpacket[i + 4] != 0x02)
3941 {
3942 sendpacket[i + 1] = 0;
3943 }
3944 }
3945
3946 /* Skip over the extended Node ID option */
3947 if( sendpacket[i] == 0x04 )
3948 {
3949 i += 8;
3950 }
3951
3952 /* We also want to turn off all header compression opt. */
3953 for(; sendpacket[i] == 0x80 ;)
3954 {
3955 sendpacket[i + 1] = 0;
3956 i += (sendpacket[i + 2] << 8) + (sendpacket[i + 3]) + 4;
3957 }
3958
3959 /* Set the packet type to timer response */
3960 sendpacket[34] = 0x01;
3961
3962 printk(KERN_INFO "%s: Sending IPXWAN Timer Response\n",devname);
3963 }
3964 else if( sendpacket[34] == 0x02 )
3965 {
3966 /* This is an information request packet */
3967 printk(KERN_INFO "%s: Received IPXWAN Information Request packet\n",devname);
3968
3969 /* Set the packet type to information response */
3970 sendpacket[34] = 0x03;
3971
3972 /* Set the router name */
3973 sendpacket[51] = 'X';
3974 sendpacket[52] = 'T';
3975 sendpacket[53] = 'P';
3976 sendpacket[54] = 'I';
3977 sendpacket[55] = 'P';
3978 sendpacket[56] = 'E';
3979 sendpacket[57] = '-';
3980 sendpacket[58] = CVHexToAscii(network_number >> 28);
3981 sendpacket[59] = CVHexToAscii((network_number & 0x0F000000)>> 24);
3982 sendpacket[60] = CVHexToAscii((network_number & 0x00F00000)>> 20);
3983 sendpacket[61] = CVHexToAscii((network_number & 0x000F0000)>> 16);
3984 sendpacket[62] = CVHexToAscii((network_number & 0x0000F000)>> 12);
3985 sendpacket[63] = CVHexToAscii((network_number & 0x00000F00)>> 8);
3986 sendpacket[64] = CVHexToAscii((network_number & 0x000000F0)>> 4);
3987 sendpacket[65] = CVHexToAscii(network_number & 0x0000000F);
3988 for(i = 66; i < 99; i+= 1)
3989 {
3990 sendpacket[i] = 0;
3991 }
3992
3993 printk(KERN_INFO "%s: Sending IPXWAN Information Response packet\n",devname);
3994 }
3995 else
3996 {
3997 printk(KERN_INFO "%s: Unknown IPXWAN packet!\n",devname);
3998 return 0;
3999 }
4000
4001 /* Set the WNodeID to our network address */
4002 sendpacket[35] = (unsigned char)(network_number >> 24);
4003 sendpacket[36] = (unsigned char)((network_number & 0x00FF0000) >> 16);
4004 sendpacket[37] = (unsigned char)((network_number & 0x0000FF00) >> 8);
4005 sendpacket[38] = (unsigned char)(network_number & 0x000000FF);
4006
4007 return 1;
4008 } else {
4009 /*If we get here it's an IPX-data packet, so it'll get passed up the stack.
4010 */
4011 /* switch the network numbers */
4012 switch_net_numbers(sendpacket, network_number, 1);
4013 return 0;
4014 }
4015}
4016
4017/*
4018 * If incoming is 0 (outgoing)- if the net numbers is ours make it 0
4019 * if incoming is 1 - if the net number is 0 make it ours
4020 */
4021
4022static void switch_net_numbers(unsigned char *sendpacket, unsigned long network_number, unsigned char incoming)
4023{
4024 unsigned long pnetwork_number;
4025
4026 pnetwork_number = (unsigned long)((sendpacket[6] << 24) +
4027 (sendpacket[7] << 16) + (sendpacket[8] << 8) +
4028 sendpacket[9]);
4029
4030
4031 if (!incoming) {
4032 /*If the destination network number is ours, make it 0 */
4033 if( pnetwork_number == network_number) {
4034 sendpacket[6] = sendpacket[7] = sendpacket[8] =
4035 sendpacket[9] = 0x00;
4036 }
4037 } else {
4038 /* If the incoming network is 0, make it ours */
4039 if( pnetwork_number == 0) {
4040 sendpacket[6] = (unsigned char)(network_number >> 24);
4041 sendpacket[7] = (unsigned char)((network_number &
4042 0x00FF0000) >> 16);
4043 sendpacket[8] = (unsigned char)((network_number &
4044 0x0000FF00) >> 8);
4045 sendpacket[9] = (unsigned char)(network_number &
4046 0x000000FF);
4047 }
4048 }
4049
4050
4051 pnetwork_number = (unsigned long)((sendpacket[18] << 24) +
4052 (sendpacket[19] << 16) + (sendpacket[20] << 8) +
4053 sendpacket[21]);
4054
4055
4056 if( !incoming ) {
4057 /* If the source network is ours, make it 0 */
4058 if( pnetwork_number == network_number) {
4059 sendpacket[18] = sendpacket[19] = sendpacket[20] =
4060 sendpacket[21] = 0x00;
4061 }
4062 } else {
4063 /* If the source network is 0, make it ours */
4064 if( pnetwork_number == 0 ) {
4065 sendpacket[18] = (unsigned char)(network_number >> 24);
4066 sendpacket[19] = (unsigned char)((network_number &
4067 0x00FF0000) >> 16);
4068 sendpacket[20] = (unsigned char)((network_number &
4069 0x0000FF00) >> 8);
4070 sendpacket[21] = (unsigned char)(network_number &
4071 0x000000FF);
4072 }
4073 }
4074} /* switch_net_numbers */
4075
4076
4077
4078
4079/********************* X25API SPECIFIC FUNCTIONS ****************/
4080
4081
4082/*===============================================================
4083 * find_channel
4084 *
4085 * Manages the lcn to device map. It increases performance
4086 * because it eliminates the need to search through the link
4087 * list for a device which is bounded to a specific lcn.
4088 *
4089 *===============================================================*/
4090
4091
4092struct net_device *find_channel(sdla_t *card, unsigned lcn)
4093{
4094 if (card->u.x.LAPB_hdlc){
4095
4096 return card->wandev.dev;
4097
4098 }else{
4099 /* We don't know whether the incoming lcn
4100 * is a PVC or an SVC channel. But we do know that
4101 * the lcn cannot be for both the PVC and the SVC
4102 * channel.
4103
4104 * If the lcn number is greater or equal to 255,
4105 * take the modulo 255 of that number. We only have
4106 * 255 locations, thus higher numbers must be mapped
4107 * to a number between 0 and 245.
4108
4109 * We must separate pvc's and svc's since two don't
4110 * have to be contiguous. Meaning pvc's can start
4111 * from 1 to 10 and svc's can start from 256 to 266.
4112 * But 256%255 is 1, i.e. CONFLICT.
4113 */
4114
4115
4116 /* Highest LCN number must be less or equal to 4096 */
4117 if ((lcn <= MAX_LCN_NUM) && (lcn > 0)){
4118
4119 if (lcn < X25_MAX_CHAN){
4120 if (card->u.x.svc_to_dev_map[lcn])
4121 return card->u.x.svc_to_dev_map[lcn];
4122
4123 if (card->u.x.pvc_to_dev_map[lcn])
4124 return card->u.x.pvc_to_dev_map[lcn];
4125
4126 }else{
4127 int new_lcn = lcn%X25_MAX_CHAN;
4128 if (card->u.x.svc_to_dev_map[new_lcn])
4129 return card->u.x.svc_to_dev_map[new_lcn];
4130
4131 if (card->u.x.pvc_to_dev_map[new_lcn])
4132 return card->u.x.pvc_to_dev_map[new_lcn];
4133 }
4134 }
4135 return NULL;
4136 }
4137}
4138
4139void bind_lcn_to_dev(sdla_t *card, struct net_device *dev, unsigned lcn)
4140{
4141 x25_channel_t *chan = dev->priv;
4142
4143 /* Modulo the lcn number by X25_MAX_CHAN (255)
4144 * because the lcn number can be greater than 255
4145 *
4146 * We need to split svc and pvc since they don't have
4147 * to be contigous.
4148 */
4149
4150 if (chan->common.svc){
4151 card->u.x.svc_to_dev_map[(lcn % X25_MAX_CHAN)] = dev;
4152 }else{
4153 card->u.x.pvc_to_dev_map[(lcn % X25_MAX_CHAN)] = dev;
4154 }
4155 chan->common.lcn = lcn;
4156}
4157
4158
4159
4160/*===============================================================
4161 * x25api_bh
4162 *
4163 *
4164 *==============================================================*/
4165
4166static void x25api_bh(struct net_device* dev)
4167{
4168 x25_channel_t* chan = dev->priv;
4169 sdla_t* card = chan->card;
4170 struct sk_buff *skb;
4171
4172 if (atomic_read(&chan->bh_buff_used) == 0){
4173 printk(KERN_INFO "%s: BH Buffer Empty in BH\n",
4174 card->devname);
4175 clear_bit(0, &chan->tq_working);
4176 return;
4177 }
4178
4179 while (atomic_read(&chan->bh_buff_used)){
4180
4181 /* If the sock is in the process of unlinking the
4182 * driver from the socket, we must get out.
4183 * This never happends but is a sanity check. */
4184 if (test_bit(0,&chan->common.common_critical)){
4185 clear_bit(0, &chan->tq_working);
4186 return;
4187 }
4188
4189 /* If LAPB HDLC, do not drop packets if socket is
4190 * not connected. Let the buffer fill up and
4191 * turn off rx interrupt */
4192 if (card->u.x.LAPB_hdlc){
4193 if (chan->common.sk == NULL || chan->common.func == NULL){
4194 clear_bit(0, &chan->tq_working);
4195 return;
4196 }
4197 }
4198
4199 skb = ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb;
4200
4201 if (skb == NULL){
4202 printk(KERN_INFO "%s: BH Skb empty for read %i\n",
4203 card->devname,chan->bh_read);
4204 }else{
4205
4206 if (chan->common.sk == NULL || chan->common.func == NULL){
4207 printk(KERN_INFO "%s: BH: Socket disconnected, dropping\n",
4208 card->devname);
4209 dev_kfree_skb_any(skb);
4210 x25api_bh_cleanup(dev);
4211 ++chan->ifstats.rx_dropped;
4212 ++chan->rx_intr_stat.rx_intr_bfr_not_passed_to_stack;
4213 continue;
4214 }
4215
4216
4217 if (chan->common.func(skb,dev,chan->common.sk) != 0){
4218 /* Sock full cannot send, queue us for another
4219 * try
4220 */
4221 printk(KERN_INFO "%s: BH: !!! Packet failed to send !!!!! \n",
4222 card->devname);
4223 atomic_set(&chan->common.receive_block,1);
4224 return;
4225 }else{
4226 x25api_bh_cleanup(dev);
4227 ++chan->rx_intr_stat.rx_intr_bfr_passed_to_stack;
4228 }
4229 }
4230 }
4231 clear_bit(0, &chan->tq_working);
4232
4233 return;
4234}
4235
4236/*===============================================================
4237 * x25api_bh_cleanup
4238 *
4239 *
4240 *==============================================================*/
4241
4242static int x25api_bh_cleanup(struct net_device *dev)
4243{
4244 x25_channel_t* chan = dev->priv;
4245 sdla_t *card = chan->card;
4246 TX25Status* status = card->flags;
4247
4248
4249 ((bh_data_t *)&chan->bh_head[chan->bh_read])->skb = NULL;
4250
4251 if (chan->bh_read == MAX_BH_BUFF){
4252 chan->bh_read=0;
4253 }else{
4254 ++chan->bh_read;
4255 }
4256
4257 /* If the Receive interrupt was off, it means
4258 * that we filled up our circular buffer. Check
4259 * that we have space in the buffer. If so
4260 * turn the RX interrupt back on.
4261 */
4262 if (!(status->imask & INTR_ON_RX_FRAME)){
4263 if (atomic_read(&chan->bh_buff_used) < (MAX_BH_BUFF+1)){
4264 printk(KERN_INFO "%s: BH: Turning on the interrupt\n",
4265 card->devname);
4266 status->imask |= INTR_ON_RX_FRAME;
4267 }
4268 }
4269
4270 atomic_dec(&chan->bh_buff_used);
4271 return 0;
4272}
4273
4274
4275/*===============================================================
4276 * bh_enqueue
4277 *
4278 *
4279 *==============================================================*/
4280
4281static int bh_enqueue(struct net_device *dev, struct sk_buff *skb)
4282{
4283 x25_channel_t* chan = dev->priv;
4284 sdla_t *card = chan->card;
4285 TX25Status* status = card->flags;
4286
4287 if (atomic_read(&chan->bh_buff_used) == (MAX_BH_BUFF+1)){
4288 printk(KERN_INFO "%s: Bottom half buffer FULL\n",
4289 card->devname);
4290 return 1;
4291 }
4292
4293 ((bh_data_t *)&chan->bh_head[chan->bh_write])->skb = skb;
4294
4295 if (chan->bh_write == MAX_BH_BUFF){
4296 chan->bh_write=0;
4297 }else{
4298 ++chan->bh_write;
4299 }
4300
4301 atomic_inc(&chan->bh_buff_used);
4302
4303 if (atomic_read(&chan->bh_buff_used) == (MAX_BH_BUFF+1)){
4304 printk(KERN_INFO "%s: Buffer is now full, Turning off RX Intr\n",
4305 card->devname);
4306 status->imask &= ~INTR_ON_RX_FRAME;
4307 }
4308
4309 return 0;
4310}
4311
4312
4313/*===============================================================
4314 * timer_intr_cmd_exec
4315 *
4316 * Called by timer interrupt to execute a command
4317 *===============================================================*/
4318
4319static int timer_intr_cmd_exec (sdla_t* card)
4320{
4321 struct net_device *dev;
4322 unsigned char more_to_exec=0;
4323 volatile x25_channel_t *chan=NULL;
4324 int i=0,bad_cmd=0,err=0;
4325
4326 if (card->u.x.cmd_dev == NULL){
4327 card->u.x.cmd_dev = card->wandev.dev;
4328 }
4329
4330 dev = card->u.x.cmd_dev;
4331
4332 for (;;){
4333
4334 chan = dev->priv;
4335
4336 if (atomic_read(&chan->common.command)){
4337
4338 bad_cmd = check_bad_command(card,dev);
4339
4340 if ((!chan->common.mbox || atomic_read(&chan->common.disconnect)) &&
4341 !bad_cmd){
4342
4343 /* Socket has died or exited, We must bring the
4344 * channel down before anybody else tries to
4345 * use it */
4346 err = channel_disconnect(card,dev);
4347 }else{
4348 err = execute_delayed_cmd(card, dev,
4349 (mbox_cmd_t*)chan->common.mbox,
4350 bad_cmd);
4351 }
4352
4353 switch (err){
4354
4355 case RETURN_RESULT:
4356
4357 /* Return the result to the socket without
4358 * delay. NO_WAIT Command */
4359 atomic_set(&chan->common.command,0);
4360 if (atomic_read(&card->u.x.command_busy))
4361 atomic_set(&card->u.x.command_busy,0);
4362
4363 send_delayed_cmd_result(card,dev,card->mbox);
4364
4365 more_to_exec=0;
4366 break;
4367 case DELAY_RESULT:
4368
4369 /* Wait for the remote to respond, before
4370 * sending the result up to the socket.
4371 * WAIT command */
4372 if (atomic_read(&card->u.x.command_busy))
4373 atomic_set(&card->u.x.command_busy,0);
4374
4375 atomic_set(&chan->common.command,0);
4376 more_to_exec=0;
4377 break;
4378 default:
4379
4380 /* If command could not be executed for
4381 * some reason (i.e return code 0x33 busy)
4382 * set the more_to_exec bit which will
4383 * indicate that this command must be exectued
4384 * again during next timer interrupt
4385 */
4386 more_to_exec=1;
4387 if (atomic_read(&card->u.x.command_busy) == 0)
4388 atomic_set(&card->u.x.command_busy,1);
4389 break;
4390 }
4391
4392 bad_cmd=0;
4393
4394 /* If flags is set, there are no hdlc buffers,
4395 * thus, wait for the next pass and try the
4396 * same command again. Otherwise, start searching
4397 * from next device on the next pass.
4398 */
4399 if (!more_to_exec){
4400 dev = move_dev_to_next(card,dev);
4401 }
4402 break;
4403 }else{
4404 /* This device has nothing to execute,
4405 * go to next.
4406 */
4407 if (atomic_read(&card->u.x.command_busy))
4408 atomic_set(&card->u.x.command_busy,0);
4409 dev = move_dev_to_next(card,dev);
4410 }
4411
4412 if (++i == card->u.x.no_dev){
4413 if (!more_to_exec){
4414 DBG_PRINTK(KERN_INFO "%s: Nothing to execute in Timer\n",
4415 card->devname);
4416 if (atomic_read(&card->u.x.command_busy)){
4417 atomic_set(&card->u.x.command_busy,0);
4418 }
4419 }
4420 break;
4421 }
4422
4423 } //End of FOR
4424
4425 card->u.x.cmd_dev = dev;
4426
4427 if (more_to_exec){
4428 /* If more commands are pending, do not turn off timer
4429 * interrupt */
4430 return 1;
4431 }else{
4432 /* No more commands, turn off timer interrupt */
4433 return 0;
4434 }
4435}
4436
4437/*===============================================================
4438 * execute_delayed_cmd
4439 *
4440 * Execute an API command which was passed down from the
4441 * sock. Sock is very limited in which commands it can
4442 * execute. Wait and No Wait commands are supported.
4443 * Place Call, Clear Call and Reset wait commands, where
4444 * Accept Call is a no_wait command.
4445 *
4446 *===============================================================*/
4447
4448static int execute_delayed_cmd(sdla_t* card, struct net_device *dev,
4449 mbox_cmd_t *usr_cmd, char bad_cmd)
4450{
4451 TX25Mbox* mbox = card->mbox;
4452 int err;
4453 x25_channel_t *chan = dev->priv;
4454 int delay=RETURN_RESULT;
4455
4456 if (!(*card->u.x.hdlc_buf_status & 0x40) && !bad_cmd){
4457 return TRY_CMD_AGAIN;
4458 }
4459
4460 /* This way a command is guaranteed to be executed for
4461 * a specific lcn, the network interface is bound to. */
4462 usr_cmd->cmd.lcn = chan->common.lcn;
4463
4464
4465 /* If channel is pvc, instead of place call
4466 * run x25_channel configuration. If running LAPB HDLC
4467 * enable communications.
4468 */
4469 if ((!chan->common.svc) && (usr_cmd->cmd.command == X25_PLACE_CALL)){
4470
4471 if (card->u.x.LAPB_hdlc){
4472 DBG_PRINTK(KERN_INFO "LAPB: Connecting\n");
4473 connect(card);
4474 set_chan_state(dev,WAN_CONNECTING);
4475 return DELAY_RESULT;
4476 }else{
4477 DBG_PRINTK(KERN_INFO "%s: PVC is CONNECTING\n",card->devname);
4478 if (x25_get_chan_conf(card, chan) == CMD_OK){
4479 set_chan_state(dev, WAN_CONNECTED);
4480 }else{
4481 set_chan_state(dev, WAN_DISCONNECTED);
4482 }
4483 return RETURN_RESULT;
4484 }
4485 }
4486
4487 /* Copy the socket mbox command onto the board */
4488
4489 memcpy(&mbox->cmd, &usr_cmd->cmd, sizeof(TX25Cmd));
4490 if (usr_cmd->cmd.length){
4491 memcpy(mbox->data, usr_cmd->data, usr_cmd->cmd.length);
4492 }
4493
4494 /* Check if command is bad. We need to copy the cmd into
4495 * the buffer regardless since we return the, mbox to
4496 * the user */
4497 if (bad_cmd){
4498 mbox->cmd.result=0x01;
4499 return RETURN_RESULT;
4500 }
4501
4502 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
4503
4504 if (err != CMD_OK && err != X25RES_NOT_READY)
4505 x25_error(card, err, usr_cmd->cmd.command, usr_cmd->cmd.lcn);
4506
4507 if (mbox->cmd.result == X25RES_NOT_READY){
4508 return TRY_CMD_AGAIN;
4509 }
4510
4511 switch (mbox->cmd.command){
4512
4513 case X25_PLACE_CALL:
4514
4515 switch (mbox->cmd.result){
4516
4517 case CMD_OK:
4518
4519 /* Check if Place call is a wait command or a
4520 * no wait command */
4521 if (atomic_read(&chan->common.command) & 0x80)
4522 delay=RETURN_RESULT;
4523 else
4524 delay=DELAY_RESULT;
4525
4526
4527 DBG_PRINTK(KERN_INFO "\n%s: PLACE CALL Binding dev %s to lcn %i\n",
4528 card->devname,dev->name, mbox->cmd.lcn);
4529
4530 bind_lcn_to_dev (card, dev, mbox->cmd.lcn);
4531 set_chan_state(dev, WAN_CONNECTING);
4532 break;
4533
4534
4535 default:
4536 delay=RETURN_RESULT;
4537 set_chan_state(dev, WAN_DISCONNECTED);
4538 break;
4539 }
4540 break;
4541
4542 case X25_ACCEPT_CALL:
4543
4544 switch (mbox->cmd.result){
4545
4546 case CMD_OK:
4547
4548 DBG_PRINTK(KERN_INFO "\n%s: ACCEPT Binding dev %s to lcn %i\n",
4549 card->devname,dev->name,mbox->cmd.lcn);
4550
4551 bind_lcn_to_dev (card, dev, mbox->cmd.lcn);
4552
4553 if (x25_get_chan_conf(card, chan) == CMD_OK){
4554
4555 set_chan_state(dev, WAN_CONNECTED);
4556 delay=RETURN_RESULT;
4557
4558 }else{
4559 if (x25_clear_call(card, usr_cmd->cmd.lcn, 0, 0) == CMD_OK){
4560 /* if clear is successful, wait for clear confirm
4561 */
4562 delay=DELAY_RESULT;
4563 }else{
4564 /* Do not change the state here. If we fail
4565 * the accept the return code is send up
4566 *the stack, which will ether retry
4567 * or clear the call
4568 */
4569 DBG_PRINTK(KERN_INFO
4570 "%s: ACCEPT: STATE MAY BE CURRUPTED 2 !!!!!\n",
4571 card->devname);
4572 delay=RETURN_RESULT;
4573 }
4574 }
4575 break;
4576
4577
4578 case X25RES_ASYNC_PACKET:
4579 delay=TRY_CMD_AGAIN;
4580 break;
4581
4582 default:
4583 DBG_PRINTK(KERN_INFO "%s: ACCEPT FAILED\n",card->devname);
4584 if (x25_clear_call(card, usr_cmd->cmd.lcn, 0, 0) == CMD_OK){
4585 delay=DELAY_RESULT;
4586 }else{
4587 /* Do not change the state here. If we fail the accept. The
4588 * return code is send up the stack, which will ether retry
4589 * or clear the call */
4590 DBG_PRINTK(KERN_INFO
4591 "%s: ACCEPT: STATE MAY BE CORRUPTED 1 !!!!!\n",
4592 card->devname);
4593 delay=RETURN_RESULT;
4594 }
4595 }
4596 break;
4597
4598 case X25_CLEAR_CALL:
4599
4600 switch (mbox->cmd.result){
4601
4602 case CMD_OK:
4603 DBG_PRINTK(KERN_INFO
4604 "CALL CLEAR OK: Dev %s Mbox Lcn %i Chan Lcn %i\n",
4605 dev->name,mbox->cmd.lcn,chan->common.lcn);
4606 set_chan_state(dev, WAN_DISCONNECTING);
4607 delay = DELAY_RESULT;
4608 break;
4609
4610 case X25RES_CHANNEL_IN_USE:
4611 case X25RES_ASYNC_PACKET:
4612 delay = TRY_CMD_AGAIN;
4613 break;
4614
4615 case X25RES_LINK_NOT_IN_ABM:
4616 case X25RES_INVAL_LCN:
4617 case X25RES_INVAL_STATE:
4618 set_chan_state(dev, WAN_DISCONNECTED);
4619 delay = RETURN_RESULT;
4620 break;
4621
4622 default:
4623 /* If command did not execute because of user
4624 * fault, do not change the state. This will
4625 * signal the socket that clear command failed.
4626 * User can retry or close the socket.
4627 * When socket gets killed, it will set the
4628 * chan->disconnect which will signal
4629 * driver to clear the call */
4630 printk(KERN_INFO "%s: Clear Command Failed, Rc %x\n",
4631 card->devname,mbox->cmd.command);
4632 delay = RETURN_RESULT;
4633 }
4634 break;
4635 }
4636
4637 return delay;
4638}
4639
4640/*===============================================================
4641 * api_incoming_call
4642 *
4643 * Pass an incoming call request up the listening
4644 * sock. If the API sock is not listening reject the
4645 * call.
4646 *
4647 *===============================================================*/
4648
4649static int api_incoming_call (sdla_t* card, TX25Mbox *mbox, int lcn)
4650{
4651 struct sk_buff *skb;
4652 int len = sizeof(TX25Cmd)+mbox->cmd.length;
4653
4654 if (alloc_and_init_skb_buf(card, &skb, len)){
4655 printk(KERN_INFO "%s: API incoming call, no memory\n",card->devname);
4656 return 1;
4657 }
4658
4659 memcpy(skb_put(skb,len),&mbox->cmd,len);
4660
4661 skb->mac.raw = skb->data;
4662 skb->protocol = htons(X25_PROT);
4663 skb->pkt_type = WAN_PACKET_ASYNC;
4664
4665 if (card->func(skb,card->sk) < 0){
4666 printk(KERN_INFO "%s: MAJOR ERROR: Failed to send up place call \n",card->devname);
4667 dev_kfree_skb_any(skb);
4668 return 1;
4669 }
4670
4671 return 0;
4672}
4673
4674/*===============================================================
4675 * send_delayed_cmd_result
4676 *
4677 * Wait commands like PLEACE CALL or CLEAR CALL must wait
4678 * until the result arrives. This function passes
4679 * the result to a waiting sock.
4680 *
4681 *===============================================================*/
4682static void send_delayed_cmd_result(sdla_t *card, struct net_device *dev,
4683 TX25Mbox* mbox)
4684{
4685 x25_channel_t *chan = dev->priv;
4686 mbox_cmd_t *usr_cmd = (mbox_cmd_t *)chan->common.mbox;
4687 struct sk_buff *skb;
4688 int len=sizeof(unsigned char);
4689
4690 atomic_set(&chan->common.command,0);
4691
4692 /* If the sock is in the process of unlinking the
4693 * driver from the socket, we must get out.
4694 * This never happends but is a sanity check. */
4695 if (test_bit(0,&chan->common.common_critical)){
4696 return;
4697 }
4698
4699 if (!usr_cmd || !chan->common.sk || !chan->common.func){
4700 DBG_PRINTK(KERN_INFO "Delay result: Sock not bounded sk: %u, func: %u, mbox: %u\n",
4701 (unsigned int)chan->common.sk,
4702 (unsigned int)chan->common.func,
4703 (unsigned int)usr_cmd);
4704 return;
4705 }
4706
4707 memcpy(&usr_cmd->cmd, &mbox->cmd, sizeof(TX25Cmd));
4708 if (mbox->cmd.length > 0){
4709 memcpy(usr_cmd->data, mbox->data, mbox->cmd.length);
4710 }
4711
4712 if (alloc_and_init_skb_buf(card,&skb,len)){
4713 printk(KERN_INFO "Delay result: No sock buffers\n");
4714 return;
4715 }
4716
4717 memcpy(skb_put(skb,len),&mbox->cmd.command,len);
4718
4719 skb->mac.raw = skb->data;
4720 skb->pkt_type = WAN_PACKET_CMD;
4721
4722 chan->common.func(skb,dev,chan->common.sk);
4723}
4724
4725/*===============================================================
4726 * clear_confirm_event
4727 *
4728 * Pass the clear confirmation event up the sock. The
4729 * API will disconnect only after the clear confirmation
4730 * has been received.
4731 *
4732 * Depending on the state, clear confirmation could
4733 * be an OOB event, or a result of an API command.
4734 *===============================================================*/
4735
4736static int clear_confirm_event (sdla_t *card, TX25Mbox* mb)
4737{
4738 struct net_device *dev;
4739 x25_channel_t *chan;
4740 unsigned char old_state;
4741
4742 dev = find_channel(card,mb->cmd.lcn);
4743 if (!dev){
4744 DBG_PRINTK(KERN_INFO "%s: *** GOT CLEAR BUT NO DEV %i\n",
4745 card->devname,mb->cmd.lcn);
4746 return 0;
4747 }
4748
4749 chan=dev->priv;
4750 DBG_PRINTK(KERN_INFO "%s: GOT CLEAR CONFIRM %s: Mbox lcn %i Chan lcn %i\n",
4751 card->devname, dev->name, mb->cmd.lcn, chan->common.lcn);
4752
4753 /* If not API fall through to default.
4754 * If API, send the result to a waiting
4755 * socket.
4756 */
4757
4758 old_state = chan->common.state;
4759 set_chan_state(dev, WAN_DISCONNECTED);
4760
4761 if (chan->common.usedby == API){
4762 switch (old_state) {
4763
4764 case WAN_DISCONNECTING:
4765 case WAN_CONNECTING:
4766 send_delayed_cmd_result(card,dev,mb);
4767 break;
4768 case WAN_CONNECTED:
4769 send_oob_msg(card,dev,mb);
4770 break;
4771 }
4772 return 1;
4773 }
4774
4775 return 0;
4776}
4777
4778/*===============================================================
4779 * send_oob_msg
4780 *
4781 * Construct an NEM Message and pass it up the connected
4782 * sock. If the sock is not bounded discard the NEM.
4783 *
4784 *===============================================================*/
4785
4786static void send_oob_msg(sdla_t *card, struct net_device *dev, TX25Mbox *mbox)
4787{
4788 x25_channel_t *chan = dev->priv;
4789 mbox_cmd_t *usr_cmd = (mbox_cmd_t *)chan->common.mbox;
4790 struct sk_buff *skb;
4791 int len=sizeof(x25api_hdr_t)+mbox->cmd.length;
4792 x25api_t *api_hdr;
4793
4794 /* If the sock is in the process of unlinking the
4795 * driver from the socket, we must get out.
4796 * This never happends but is a sanity check. */
4797 if (test_bit(0,&chan->common.common_critical)){
4798 return;
4799 }
4800
4801 if (!usr_cmd || !chan->common.sk || !chan->common.func){
4802 DBG_PRINTK(KERN_INFO "OOB MSG: Sock not bounded\n");
4803 return;
4804 }
4805
4806 memcpy(&usr_cmd->cmd, &mbox->cmd, sizeof(TX25Cmd));
4807 if (mbox->cmd.length > 0){
4808 memcpy(usr_cmd->data, mbox->data, mbox->cmd.length);
4809 }
4810
4811 if (alloc_and_init_skb_buf(card,&skb,len)){
4812 printk(KERN_INFO "%s: OOB MSG: No sock buffers\n",card->devname);
4813 return;
4814 }
4815
4816 api_hdr = (x25api_t*)skb_put(skb,len);
4817 api_hdr->hdr.pktType = mbox->cmd.pktType & 0x7F;
4818 api_hdr->hdr.qdm = mbox->cmd.qdm;
4819 api_hdr->hdr.cause = mbox->cmd.cause;
4820 api_hdr->hdr.diagn = mbox->cmd.diagn;
4821 api_hdr->hdr.length = mbox->cmd.length;
4822 api_hdr->hdr.result = mbox->cmd.result;
4823 api_hdr->hdr.lcn = mbox->cmd.lcn;
4824
4825 if (mbox->cmd.length > 0){
4826 memcpy(api_hdr->data,mbox->data,mbox->cmd.length);
4827 }
4828
4829 skb->mac.raw = skb->data;
4830 skb->pkt_type = WAN_PACKET_ERR;
4831
4832 if (chan->common.func(skb,dev,chan->common.sk) < 0){
4833 if (bh_enqueue(dev,skb)){
4834 printk(KERN_INFO "%s: Dropping OOB MSG\n",card->devname);
4835 dev_kfree_skb_any(skb);
4836 }
4837 }
4838
4839 DBG_PRINTK(KERN_INFO "%s: OOB MSG OK, %s, lcn %i\n",
4840 card->devname, dev->name, mbox->cmd.lcn);
4841}
4842
4843/*===============================================================
4844 * alloc_and_init_skb_buf
4845 *
4846 * Allocate and initialize an skb buffer.
4847 *
4848 *===============================================================*/
4849
4850static int alloc_and_init_skb_buf (sdla_t *card, struct sk_buff **skb, int len)
4851{
4852 struct sk_buff *new_skb = *skb;
4853
4854 new_skb = dev_alloc_skb(len + X25_HRDHDR_SZ);
4855 if (new_skb == NULL){
4856 printk(KERN_INFO "%s: no socket buffers available!\n",
4857 card->devname);
4858 return 1;
4859 }
4860
4861 if (skb_tailroom(new_skb) < len){
4862 /* No room for the packet. Call off the whole thing! */
4863 dev_kfree_skb_any(new_skb);
4864 printk(KERN_INFO "%s: Listen: unexpectedly long packet sequence\n"
4865 ,card->devname);
4866 *skb = NULL;
4867 return 1;
4868 }
4869
4870 *skb = new_skb;
4871 return 0;
4872
4873}
4874
4875/*===============================================================
4876 * api_oob_event
4877 *
4878 * Send an OOB event up to the sock
4879 *
4880 *===============================================================*/
4881
4882static void api_oob_event (sdla_t *card,TX25Mbox *mbox)
4883{
4884 struct net_device *dev = find_channel(card, mbox->cmd.lcn);
4885 x25_channel_t *chan;
4886
4887 if (!dev)
4888 return;
4889
4890 chan=dev->priv;
4891
4892 if (chan->common.usedby == API)
4893 send_oob_msg(card,dev,mbox);
4894
4895}
4896
4897
4898
4899
4900static int channel_disconnect(sdla_t* card, struct net_device *dev)
4901{
4902
4903 int err;
4904 x25_channel_t *chan = dev->priv;
4905
4906 DBG_PRINTK(KERN_INFO "%s: TIMER: %s, Device down disconnecting\n",
4907 card->devname,dev->name);
4908
4909 if (chan->common.svc){
4910 err = x25_clear_call(card,chan->common.lcn,0,0);
4911 }else{
4912 /* If channel is PVC or LAPB HDLC, there is no call
4913 * to be cleared, thus drop down to the default
4914 * area
4915 */
4916 err = 1;
4917 }
4918
4919 switch (err){
4920
4921 case X25RES_CHANNEL_IN_USE:
4922 case X25RES_NOT_READY:
4923 err = TRY_CMD_AGAIN;
4924 break;
4925 case CMD_OK:
4926 DBG_PRINTK(KERN_INFO "CALL CLEAR OK: Dev %s Chan Lcn %i\n",
4927 dev->name,chan->common.lcn);
4928
4929 set_chan_state(dev,WAN_DISCONNECTING);
4930 atomic_set(&chan->common.command,0);
4931 err = DELAY_RESULT;
4932 break;
4933 default:
4934 /* If LAPB HDLC protocol, bring the whole link down
4935 * once the application terminates
4936 */
4937
4938 set_chan_state(dev,WAN_DISCONNECTED);
4939
4940 if (card->u.x.LAPB_hdlc){
4941 DBG_PRINTK(KERN_INFO "LAPB: Disconnecting Link\n");
4942 hdlc_link_down (card);
4943 }
4944 atomic_set(&chan->common.command,0);
4945 err = RETURN_RESULT;
4946 break;
4947 }
4948
4949 return err;
4950}
4951
4952static void hdlc_link_down (sdla_t *card)
4953{
4954 TX25Mbox* mbox = card->mbox;
4955 int retry = 5;
4956 int err=0;
4957
4958 do {
4959 memset(mbox,0,sizeof(TX25Mbox));
4960 mbox->cmd.command = X25_HDLC_LINK_DISC;
4961 mbox->cmd.length = 1;
4962 mbox->data[0]=0;
4963 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
4964
4965 } while (err && retry-- && x25_error(card, err, X25_HDLC_LINK_DISC, 0));
4966
4967 if (err)
4968 printk(KERN_INFO "%s: Hdlc Link Down Failed %x\n",card->devname,err);
4969
4970 disconnect (card);
4971
4972}
4973
4974static int check_bad_command(sdla_t* card, struct net_device *dev)
4975{
4976 x25_channel_t *chan = dev->priv;
4977 int bad_cmd = 0;
4978
4979 switch (atomic_read(&chan->common.command)&0x7F){
4980
4981 case X25_PLACE_CALL:
4982 if (chan->common.state != WAN_DISCONNECTED)
4983 bad_cmd=1;
4984 break;
4985 case X25_CLEAR_CALL:
4986 if (chan->common.state == WAN_DISCONNECTED)
4987 bad_cmd=1;
4988 break;
4989 case X25_ACCEPT_CALL:
4990 if (chan->common.state != WAN_CONNECTING)
4991 bad_cmd=1;
4992 break;
4993 case X25_RESET:
4994 if (chan->common.state != WAN_CONNECTED)
4995 bad_cmd=1;
4996 break;
4997 default:
4998 bad_cmd=1;
4999 break;
5000 }
5001
5002 if (bad_cmd){
5003 printk(KERN_INFO "%s: Invalid State, BAD Command %x, dev %s, lcn %i, st %i\n",
5004 card->devname,atomic_read(&chan->common.command),dev->name,
5005 chan->common.lcn, chan->common.state);
5006 }
5007
5008 return bad_cmd;
5009}
5010
5011
5012
5013/*************************** XPIPEMON FUNCTIONS **************************/
5014
5015/*==============================================================================
5016 * Process UDP call of type XPIPE
5017 */
5018
5019static int process_udp_mgmt_pkt(sdla_t *card)
5020{
5021 int c_retry = MAX_CMD_RETRY;
5022 unsigned int len;
5023 struct sk_buff *new_skb;
5024 TX25Mbox *mbox = card->mbox;
5025 int err;
5026 int udp_mgmt_req_valid = 1;
5027 struct net_device *dev;
5028 x25_channel_t *chan;
5029 unsigned short lcn;
5030 struct timeval tv;
5031
5032
5033 x25_udp_pkt_t *x25_udp_pkt;
5034 x25_udp_pkt = (x25_udp_pkt_t *)card->u.x.udp_pkt_data;
5035
5036 dev = card->u.x.udp_dev;
5037 chan = dev->priv;
5038 lcn = chan->common.lcn;
5039
5040 switch(x25_udp_pkt->cblock.command) {
5041
5042 /* XPIPE_ENABLE_TRACE */
5043 case XPIPE_ENABLE_TRACING:
5044
5045 /* XPIPE_GET_TRACE_INFO */
5046 case XPIPE_GET_TRACE_INFO:
5047
5048 /* SET FT1 MODE */
5049 case XPIPE_SET_FT1_MODE:
5050
5051 if(card->u.x.udp_pkt_src == UDP_PKT_FRM_NETWORK) {
5052 ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_direction_err;
5053 udp_mgmt_req_valid = 0;
5054 break;
5055 }
5056
5057 /* XPIPE_FT1_READ_STATUS */
5058 case XPIPE_FT1_READ_STATUS:
5059
5060 /* FT1 MONITOR STATUS */
5061 case XPIPE_FT1_STATUS_CTRL:
5062 if(card->hw.fwid != SFID_X25_508) {
5063 ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_type_err;
5064 udp_mgmt_req_valid = 0;
5065 break;
5066 }
5067 default:
5068 break;
5069 }
5070
5071 if(!udp_mgmt_req_valid) {
5072 /* set length to 0 */
5073 x25_udp_pkt->cblock.length = 0;
5074 /* set return code */
5075 x25_udp_pkt->cblock.result = (card->hw.fwid != SFID_X25_508) ? 0x1F : 0xCD;
5076
5077 } else {
5078
5079 switch (x25_udp_pkt->cblock.command) {
5080
5081
5082 case XPIPE_FLUSH_DRIVER_STATS:
5083 init_x25_channel_struct(chan);
5084 init_global_statistics(card);
5085 mbox->cmd.length = 0;
5086 break;
5087
5088
5089 case XPIPE_DRIVER_STAT_IFSEND:
5090 memcpy(x25_udp_pkt->data, &chan->if_send_stat, sizeof(if_send_stat_t));
5091 mbox->cmd.length = sizeof(if_send_stat_t);
5092 x25_udp_pkt->cblock.length = mbox->cmd.length;
5093 break;
5094
5095 case XPIPE_DRIVER_STAT_INTR:
5096 memcpy(&x25_udp_pkt->data[0], &card->statistics, sizeof(global_stats_t));
5097 memcpy(&x25_udp_pkt->data[sizeof(global_stats_t)],
5098 &chan->rx_intr_stat, sizeof(rx_intr_stat_t));
5099
5100 mbox->cmd.length = sizeof(global_stats_t) +
5101 sizeof(rx_intr_stat_t);
5102 x25_udp_pkt->cblock.length = mbox->cmd.length;
5103 break;
5104
5105 case XPIPE_DRIVER_STAT_GEN:
5106 memcpy(x25_udp_pkt->data,
5107 &chan->pipe_mgmt_stat.UDP_PIPE_mgmt_kmalloc_err,
5108 sizeof(pipe_mgmt_stat_t));
5109
5110 memcpy(&x25_udp_pkt->data[sizeof(pipe_mgmt_stat_t)],
5111 &card->statistics, sizeof(global_stats_t));
5112
5113 x25_udp_pkt->cblock.result = 0;
5114 x25_udp_pkt->cblock.length = sizeof(global_stats_t)+
5115 sizeof(rx_intr_stat_t);
5116 mbox->cmd.length = x25_udp_pkt->cblock.length;
5117 break;
5118
5119 case XPIPE_ROUTER_UP_TIME:
5120 do_gettimeofday(&tv);
5121 chan->router_up_time = tv.tv_sec - chan->router_start_time;
5122 *(unsigned long *)&x25_udp_pkt->data = chan->router_up_time;
5123 x25_udp_pkt->cblock.length = mbox->cmd.length = 4;
5124 x25_udp_pkt->cblock.result = 0;
5125 break;
5126
5127 default :
5128
5129 do {
5130 memcpy(&mbox->cmd, &x25_udp_pkt->cblock.command, sizeof(TX25Cmd));
5131 if(mbox->cmd.length){
5132 memcpy(&mbox->data,
5133 (char *)x25_udp_pkt->data,
5134 mbox->cmd.length);
5135 }
5136
5137 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
5138 } while (err && c_retry-- && x25_error(card, err, mbox->cmd.command, 0));
5139
5140
5141 if ( err == CMD_OK ||
5142 (err == 1 &&
5143 (mbox->cmd.command == 0x06 ||
5144 mbox->cmd.command == 0x16) ) ){
5145
5146 ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_cmnd_OK;
5147 } else {
5148 ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_cmnd_timeout;
5149 }
5150
5151 /* copy the result back to our buffer */
5152 memcpy(&x25_udp_pkt->cblock.command, &mbox->cmd, sizeof(TX25Cmd));
5153
5154 if(mbox->cmd.length) {
5155 memcpy(&x25_udp_pkt->data, &mbox->data, mbox->cmd.length);
5156 }
5157 break;
5158
5159 } //switch
5160
5161 }
5162
5163 /* Fill UDP TTL */
5164
5165 x25_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
5166 len = reply_udp(card->u.x.udp_pkt_data, mbox->cmd.length);
5167
5168
5169 if(card->u.x.udp_pkt_src == UDP_PKT_FRM_NETWORK) {
5170
5171 err = x25_send(card, lcn, 0, len, card->u.x.udp_pkt_data);
5172 if (!err)
5173 ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_send_passed;
5174 else
5175 ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_adptr_send_failed;
5176
5177 } else {
5178
5179 /* Allocate socket buffer */
5180 if((new_skb = dev_alloc_skb(len)) != NULL) {
5181 void *buf;
5182
5183 /* copy data into new_skb */
5184 buf = skb_put(new_skb, len);
5185 memcpy(buf, card->u.x.udp_pkt_data, len);
5186
5187 /* Decapsulate packet and pass it up the protocol
5188 stack */
5189 new_skb->dev = dev;
5190
5191 if (chan->common.usedby == API)
5192 new_skb->protocol = htons(X25_PROT);
5193 else
5194 new_skb->protocol = htons(ETH_P_IP);
5195
5196 new_skb->mac.raw = new_skb->data;
5197
5198 netif_rx(new_skb);
5199 ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_passed_to_stack;
5200
5201 } else {
5202 ++chan->pipe_mgmt_stat.UDP_PIPE_mgmt_no_socket;
5203 printk(KERN_INFO
5204 "%s: UDP mgmt cmnd, no socket buffers available!\n",
5205 card->devname);
5206 }
5207 }
5208
5209 card->u.x.udp_pkt_lgth = 0;
5210
5211 return 1;
5212}
5213
5214
5215/*==============================================================================
5216 * Determine what type of UDP call it is. DRVSTATS or XPIPE8ND ?
5217 */
5218static int udp_pkt_type( struct sk_buff *skb, sdla_t* card )
5219{
5220 x25_udp_pkt_t *x25_udp_pkt = (x25_udp_pkt_t *)skb->data;
5221
5222 if((x25_udp_pkt->ip_pkt.protocol == UDPMGMT_UDP_PROTOCOL) &&
5223 (x25_udp_pkt->ip_pkt.ver_inet_hdr_length == 0x45) &&
5224 (x25_udp_pkt->udp_pkt.udp_dst_port == ntohs(card->wandev.udp_port)) &&
5225 (x25_udp_pkt->wp_mgmt.request_reply == UDPMGMT_REQUEST)) {
5226
5227 if(!strncmp(x25_udp_pkt->wp_mgmt.signature,
5228 UDPMGMT_XPIPE_SIGNATURE, 8)){
5229 return UDP_XPIPE_TYPE;
5230 }else{
5231 printk(KERN_INFO "%s: UDP Packet, Failed Signature !\n",
5232 card->devname);
5233 }
5234 }
5235
5236 return UDP_INVALID_TYPE;
5237}
5238
5239
5240/*============================================================================
5241 * Reply to UDP Management system.
5242 * Return nothing.
5243 */
5244static int reply_udp( unsigned char *data, unsigned int mbox_len )
5245{
5246 unsigned short len, udp_length, temp, ip_length;
5247 unsigned long ip_temp;
5248 int even_bound = 0;
5249
5250
5251 x25_udp_pkt_t *x25_udp_pkt = (x25_udp_pkt_t *)data;
5252
5253 /* Set length of packet */
5254 len = sizeof(ip_pkt_t)+
5255 sizeof(udp_pkt_t)+
5256 sizeof(wp_mgmt_t)+
5257 sizeof(cblock_t)+
5258 mbox_len;
5259
5260
5261 /* fill in UDP reply */
5262 x25_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
5263
5264 /* fill in UDP length */
5265 udp_length = sizeof(udp_pkt_t)+
5266 sizeof(wp_mgmt_t)+
5267 sizeof(cblock_t)+
5268 mbox_len;
5269
5270
5271 /* put it on an even boundary */
5272 if ( udp_length & 0x0001 ) {
5273 udp_length += 1;
5274 len += 1;
5275 even_bound = 1;
5276 }
5277
5278 temp = (udp_length<<8)|(udp_length>>8);
5279 x25_udp_pkt->udp_pkt.udp_length = temp;
5280
5281 /* swap UDP ports */
5282 temp = x25_udp_pkt->udp_pkt.udp_src_port;
5283 x25_udp_pkt->udp_pkt.udp_src_port =
5284 x25_udp_pkt->udp_pkt.udp_dst_port;
5285 x25_udp_pkt->udp_pkt.udp_dst_port = temp;
5286
5287
5288
5289 /* add UDP pseudo header */
5290 temp = 0x1100;
5291 *((unsigned short *)
5292 (x25_udp_pkt->data+mbox_len+even_bound)) = temp;
5293 temp = (udp_length<<8)|(udp_length>>8);
5294 *((unsigned short *)
5295 (x25_udp_pkt->data+mbox_len+even_bound+2)) = temp;
5296
5297 /* calculate UDP checksum */
5298 x25_udp_pkt->udp_pkt.udp_checksum = 0;
5299
5300 x25_udp_pkt->udp_pkt.udp_checksum =
5301 calc_checksum(&data[UDP_OFFSET], udp_length+UDP_OFFSET);
5302
5303 /* fill in IP length */
5304 ip_length = len;
5305 temp = (ip_length<<8)|(ip_length>>8);
5306 x25_udp_pkt->ip_pkt.total_length = temp;
5307
5308 /* swap IP addresses */
5309 ip_temp = x25_udp_pkt->ip_pkt.ip_src_address;
5310 x25_udp_pkt->ip_pkt.ip_src_address =
5311 x25_udp_pkt->ip_pkt.ip_dst_address;
5312 x25_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
5313
5314
5315 /* fill in IP checksum */
5316 x25_udp_pkt->ip_pkt.hdr_checksum = 0;
5317 x25_udp_pkt->ip_pkt.hdr_checksum = calc_checksum(data, sizeof(ip_pkt_t));
5318
5319 return len;
5320} /* reply_udp */
5321
5322unsigned short calc_checksum (char *data, int len)
5323{
5324 unsigned short temp;
5325 unsigned long sum=0;
5326 int i;
5327
5328 for( i = 0; i <len; i+=2 ) {
5329 memcpy(&temp,&data[i],2);
5330 sum += (unsigned long)temp;
5331 }
5332
5333 while (sum >> 16 ) {
5334 sum = (sum & 0xffffUL) + (sum >> 16);
5335 }
5336
5337 temp = (unsigned short)sum;
5338 temp = ~temp;
5339
5340 if( temp == 0 )
5341 temp = 0xffff;
5342
5343 return temp;
5344}
5345
5346/*=============================================================================
5347 * Store a UDP management packet for later processing.
5348 */
5349
5350static int store_udp_mgmt_pkt(int udp_type, char udp_pkt_src, sdla_t* card,
5351 struct net_device *dev, struct sk_buff *skb,
5352 int lcn)
5353{
5354 int udp_pkt_stored = 0;
5355
5356 if(!card->u.x.udp_pkt_lgth && (skb->len <= MAX_LGTH_UDP_MGNT_PKT)){
5357 card->u.x.udp_pkt_lgth = skb->len;
5358 card->u.x.udp_type = udp_type;
5359 card->u.x.udp_pkt_src = udp_pkt_src;
5360 card->u.x.udp_lcn = lcn;
5361 card->u.x.udp_dev = dev;
5362 memcpy(card->u.x.udp_pkt_data, skb->data, skb->len);
5363 card->u.x.timer_int_enabled |= TMR_INT_ENABLED_UDP_PKT;
5364 udp_pkt_stored = 1;
5365
5366 }else{
5367 printk(KERN_INFO "%s: ERROR: UDP packet not stored for LCN %d\n",
5368 card->devname,lcn);
5369 }
5370
5371 if(udp_pkt_src == UDP_PKT_FRM_STACK){
5372 dev_kfree_skb_any(skb);
5373 }else{
5374 dev_kfree_skb_any(skb);
5375 }
5376
5377 return(udp_pkt_stored);
5378}
5379
5380
5381
5382/*=============================================================================
5383 * Initial the ppp_private_area structure.
5384 */
5385static void init_x25_channel_struct( x25_channel_t *chan )
5386{
5387 memset(&chan->if_send_stat.if_send_entry,0,sizeof(if_send_stat_t));
5388 memset(&chan->rx_intr_stat.rx_intr_no_socket,0,sizeof(rx_intr_stat_t));
5389 memset(&chan->pipe_mgmt_stat.UDP_PIPE_mgmt_kmalloc_err,0,sizeof(pipe_mgmt_stat_t));
5390}
5391
5392/*============================================================================
5393 * Initialize Global Statistics
5394 */
5395static void init_global_statistics( sdla_t *card )
5396{
5397 memset(&card->statistics.isr_entry,0,sizeof(global_stats_t));
5398}
5399
5400
5401/*===============================================================
5402 * SMP Support
5403 * ==============================================================*/
5404
5405static void S508_S514_lock(sdla_t *card, unsigned long *smp_flags)
5406{
5407 spin_lock_irqsave(&card->wandev.lock, *smp_flags);
5408}
5409static void S508_S514_unlock(sdla_t *card, unsigned long *smp_flags)
5410{
5411 spin_unlock_irqrestore(&card->wandev.lock, *smp_flags);
5412}
5413
5414/*===============================================================
5415 * x25_timer_routine
5416 *
5417 * A more efficient polling routine. Each half a second
5418 * queue a polling task. We want to do the polling in a
5419 * task not timer, because timer runs in interrupt time.
5420 *
5421 * FIXME Polling should be rethinked.
5422 *==============================================================*/
5423
5424static void x25_timer_routine(unsigned long data)
5425{
5426 sdla_t *card = (sdla_t*)data;
5427
5428 if (!card->wandev.dev){
5429 printk(KERN_INFO "%s: Stopping the X25 Poll Timer: No Dev.\n",
5430 card->devname);
5431 return;
5432 }
5433
5434 if (card->open_cnt != card->u.x.num_of_ch){
5435 printk(KERN_INFO "%s: Stopping the X25 Poll Timer: Interface down.\n",
5436 card->devname);
5437 return;
5438 }
5439
5440 if (test_bit(PERI_CRIT,&card->wandev.critical)){
5441 printk(KERN_INFO "%s: Stopping the X25 Poll Timer: Shutting down.\n",
5442 card->devname);
5443 return;
5444 }
5445
5446 if (!test_and_set_bit(POLL_CRIT,&card->wandev.critical)){
5447 trigger_x25_poll(card);
5448 }
5449
5450 card->u.x.x25_timer.expires=jiffies+(HZ>>1);
5451 add_timer(&card->u.x.x25_timer);
5452 return;
5453}
5454
5455void disable_comm_shutdown(sdla_t *card)
5456{
5457 TX25Mbox* mbox = card->mbox;
5458 int err;
5459
5460 /* Turn of interrutps */
5461 mbox->data[0] = 0;
5462 if (card->hw.fwid == SFID_X25_508){
5463 mbox->data[1] = card->hw.irq;
5464 mbox->data[2] = 2;
5465 mbox->cmd.length = 3;
5466 }else {
5467 mbox->cmd.length = 1;
5468 }
5469 mbox->cmd.command = X25_SET_INTERRUPT_MODE;
5470 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
5471 if (err)
5472 printk(KERN_INFO "INTERRUPT OFF FAIED %x\n",err);
5473
5474 /* Bring down HDLC */
5475 mbox->cmd.command = X25_HDLC_LINK_CLOSE;
5476 mbox->cmd.length = 0;
5477 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
5478 if (err)
5479 printk(KERN_INFO "LINK CLOSED FAILED %x\n",err);
5480
5481
5482 /* Brind down DTR */
5483 mbox->data[0] = 0;
5484 mbox->data[2] = 0;
5485 mbox->data[1] = 0x01;
5486 mbox->cmd.length = 3;
5487 mbox->cmd.command = X25_SET_GLOBAL_VARS;
5488 err = sdla_exec(mbox) ? mbox->cmd.result : CMD_TIMEOUT;
5489 if (err)
5490 printk(KERN_INFO "DTR DOWN FAILED %x\n",err);
5491
5492}
5493
5494MODULE_LICENSE("GPL");
5495
5496/****** End *****************************************************************/
diff --git a/drivers/net/wan/sdladrv.c b/drivers/net/wan/sdladrv.c
new file mode 100644
index 000000000000..c8bc6da57a41
--- /dev/null
+++ b/drivers/net/wan/sdladrv.c
@@ -0,0 +1,2318 @@
1/*****************************************************************************
2* sdladrv.c SDLA Support Module. Main module.
3*
4* This module is a library of common hardware-specific functions
5* used by all Sangoma drivers.
6*
7* Author: Gideon Hack
8*
9* Copyright: (c) 1995-2000 Sangoma Technologies Inc.
10*
11* This program is free software; you can redistribute it and/or
12* modify it under the terms of the GNU General Public License
13* as published by the Free Software Foundation; either version
14* 2 of the License, or (at your option) any later version.
15* ============================================================================
16* Mar 20, 2001 Nenad Corbic Added the auto_pci_cfg filed, to support
17* the PCISLOT #0.
18* Apr 04, 2000 Nenad Corbic Fixed the auto memory detection code.
19* The memory test at address 0xC8000.
20* Mar 09, 2000 Nenad Corbic Added Gideon's Bug Fix: clear pci
21* interrupt flags on initial load.
22* Jun 02, 1999 Gideon Hack Added support for the S514 adapter.
23* Updates for Linux 2.2.X kernels.
24* Sep 17, 1998 Jaspreet Singh Updates for linux 2.2.X kernels
25* Dec 20, 1996 Gene Kozin Version 3.0.0. Complete overhaul.
26* Jul 12, 1996 Gene Kozin Changes for Linux 2.0 compatibility.
27* Jun 12, 1996 Gene Kozin Added support for S503 card.
28* Apr 30, 1996 Gene Kozin SDLA hardware interrupt is acknowledged before
29* calling protocolspecific ISR.
30* Register I/O ports with Linux kernel.
31* Miscellaneous bug fixes.
32* Dec 20, 1995 Gene Kozin Fixed a bug in interrupt routine.
33* Oct 14, 1995 Gene Kozin Initial version.
34*****************************************************************************/
35
36/*****************************************************************************
37 * Notes:
38 * ------
39 * 1. This code is ment to be system-independent (as much as possible). To
40 * achive this, various macros are used to hide system-specific interfaces.
41 * To compile this code, one of the following constants must be defined:
42 *
43 * Platform Define
44 * -------- ------
45 * Linux _LINUX_
46 * SCO Unix _SCO_UNIX_
47 *
48 * 2. Supported adapter types:
49 *
50 * S502A
51 * ES502A (S502E)
52 * S503
53 * S507
54 * S508 (S509)
55 *
56 * 3. S502A Notes:
57 *
58 * There is no separate DPM window enable/disable control in S502A. It
59 * opens immediately after a window number it written to the HMCR
60 * register. To close the window, HMCR has to be written a value
61 * ????1111b (e.g. 0x0F or 0xFF).
62 *
63 * S502A DPM window cannot be located at offset E000 (e.g. 0xAE000).
64 *
65 * There should be a delay of ??? before reading back S502A status
66 * register.
67 *
68 * 4. S502E Notes:
69 *
70 * S502E has a h/w bug: although default IRQ line state is HIGH, enabling
71 * interrupts by setting bit 1 of the control register (BASE) to '1'
72 * causes it to go LOW! Therefore, disabling interrupts by setting that
73 * bit to '0' causes low-to-high transition on IRQ line (ghosty
74 * interrupt). The same occurs when disabling CPU by resetting bit 0 of
75 * CPU control register (BASE+3) - see the next note.
76 *
77 * S502E CPU and DPM control is limited:
78 *
79 * o CPU cannot be stopped independently. Resetting bit 0 of the CPUi
80 * control register (BASE+3) shuts the board down entirely, including
81 * DPM;
82 *
83 * o DPM access cannot be controlled dynamically. Ones CPU is started,
84 * bit 1 of the control register (BASE) is used to enable/disable IRQ,
85 * so that access to shared memory cannot be disabled while CPU is
86 * running.
87 ****************************************************************************/
88
89#define _LINUX_
90
91#if defined(_LINUX_) /****** Linux *******************************/
92
93#include <linux/config.h>
94#include <linux/kernel.h> /* printk(), and other useful stuff */
95#include <linux/stddef.h> /* offsetof(), etc. */
96#include <linux/errno.h> /* return codes */
97#include <linux/string.h> /* inline memset(), etc. */
98#include <linux/module.h> /* support for loadable modules */
99#include <linux/jiffies.h> /* for jiffies, HZ, etc. */
100#include <linux/sdladrv.h> /* API definitions */
101#include <linux/sdlasfm.h> /* SDLA firmware module definitions */
102#include <linux/sdlapci.h> /* SDLA PCI hardware definitions */
103#include <linux/pci.h> /* PCI defines and function prototypes */
104#include <asm/io.h> /* for inb(), outb(), etc. */
105
106#define _INB(port) (inb(port))
107#define _OUTB(port, byte) (outb((byte),(port)))
108#define SYSTEM_TICK jiffies
109
110#include <linux/init.h>
111
112
113#elif defined(_SCO_UNIX_) /****** SCO Unix ****************************/
114
115#if !defined(INKERNEL)
116#error This code MUST be compiled in kernel mode!
117#endif
118#include <sys/sdladrv.h> /* API definitions */
119#include <sys/sdlasfm.h> /* SDLA firmware module definitions */
120#include <sys/inline.h> /* for inb(), outb(), etc. */
121#define _INB(port) (inb(port))
122#define _OUTB(port, byte) (outb((port),(byte)))
123#define SYSTEM_TICK lbolt
124
125#else
126#error Unknown system type!
127#endif
128
129#define MOD_VERSION 3
130#define MOD_RELEASE 0
131
132#define SDLA_IODELAY 100 /* I/O Rd/Wr delay, 10 works for 486DX2-66 */
133#define EXEC_DELAY 20 /* shared memory access delay, mks */
134#define EXEC_TIMEOUT (HZ*2) /* command timeout, in ticks */
135
136/* I/O port address range */
137#define S502A_IORANGE 3
138#define S502E_IORANGE 4
139#define S503_IORANGE 3
140#define S507_IORANGE 4
141#define S508_IORANGE 4
142
143/* Maximum amount of memory */
144#define S502_MAXMEM 0x10000L
145#define S503_MAXMEM 0x10000L
146#define S507_MAXMEM 0x40000L
147#define S508_MAXMEM 0x40000L
148
149/* Minimum amount of memory */
150#define S502_MINMEM 0x8000L
151#define S503_MINMEM 0x8000L
152#define S507_MINMEM 0x20000L
153#define S508_MINMEM 0x20000L
154#define NO_PORT -1
155
156
157
158
159
160/****** Function Prototypes *************************************************/
161
162/* Hardware-specific functions */
163static int sdla_detect (sdlahw_t* hw);
164static int sdla_autodpm (sdlahw_t* hw);
165static int sdla_setdpm (sdlahw_t* hw);
166static int sdla_load (sdlahw_t* hw, sfm_t* sfm, unsigned len);
167static int sdla_init (sdlahw_t* hw);
168static unsigned long sdla_memtest (sdlahw_t* hw);
169static int sdla_bootcfg (sdlahw_t* hw, sfm_info_t* sfminfo);
170static unsigned char make_config_byte (sdlahw_t* hw);
171static int sdla_start (sdlahw_t* hw, unsigned addr);
172
173static int init_s502a (sdlahw_t* hw);
174static int init_s502e (sdlahw_t* hw);
175static int init_s503 (sdlahw_t* hw);
176static int init_s507 (sdlahw_t* hw);
177static int init_s508 (sdlahw_t* hw);
178
179static int detect_s502a (int port);
180static int detect_s502e (int port);
181static int detect_s503 (int port);
182static int detect_s507 (int port);
183static int detect_s508 (int port);
184static int detect_s514 (sdlahw_t* hw);
185static int find_s514_adapter(sdlahw_t* hw, char find_first_S514_card);
186
187/* Miscellaneous functions */
188static void peek_by_4 (unsigned long src, void* buf, unsigned len);
189static void poke_by_4 (unsigned long dest, void* buf, unsigned len);
190static int calibrate_delay (int mks);
191static int get_option_index (unsigned* optlist, unsigned optval);
192static unsigned check_memregion (void* ptr, unsigned len);
193static unsigned test_memregion (void* ptr, unsigned len);
194static unsigned short checksum (unsigned char* buf, unsigned len);
195static int init_pci_slot(sdlahw_t *);
196
197static int pci_probe(sdlahw_t *hw);
198
199/****** Global Data **********************************************************
200 * Note: All data must be explicitly initialized!!!
201 */
202
203static struct pci_device_id sdladrv_pci_tbl[] = {
204 { V3_VENDOR_ID, V3_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, },
205 { } /* Terminating entry */
206};
207MODULE_DEVICE_TABLE(pci, sdladrv_pci_tbl);
208
209MODULE_LICENSE("GPL");
210
211/* private data */
212static char modname[] = "sdladrv";
213static char fullname[] = "SDLA Support Module";
214static char copyright[] = "(c) 1995-1999 Sangoma Technologies Inc.";
215static unsigned exec_idle;
216
217/* Hardware configuration options.
218 * These are arrays of configuration options used by verification routines.
219 * The first element of each array is its size (i.e. number of options).
220 */
221static unsigned s502_port_options[] =
222 { 4, 0x250, 0x300, 0x350, 0x360 }
223;
224static unsigned s503_port_options[] =
225 { 8, 0x250, 0x254, 0x300, 0x304, 0x350, 0x354, 0x360, 0x364 }
226;
227static unsigned s508_port_options[] =
228 { 8, 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390 }
229;
230
231static unsigned s502a_irq_options[] = { 0 };
232static unsigned s502e_irq_options[] = { 4, 2, 3, 5, 7 };
233static unsigned s503_irq_options[] = { 5, 2, 3, 4, 5, 7 };
234static unsigned s508_irq_options[] = { 8, 3, 4, 5, 7, 10, 11, 12, 15 };
235
236static unsigned s502a_dpmbase_options[] =
237{
238 28,
239 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000,
240 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000,
241 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000,
242 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000,
243};
244static unsigned s507_dpmbase_options[] =
245{
246 32,
247 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000,
248 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000,
249 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
250 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000,
251};
252static unsigned s508_dpmbase_options[] = /* incl. S502E and S503 */
253{
254 32,
255 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000,
256 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
257 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000,
258 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000,
259};
260
261/*
262static unsigned s502_dpmsize_options[] = { 2, 0x2000, 0x10000 };
263static unsigned s507_dpmsize_options[] = { 2, 0x2000, 0x4000 };
264static unsigned s508_dpmsize_options[] = { 1, 0x2000 };
265*/
266
267static unsigned s502a_pclk_options[] = { 2, 3600, 7200 };
268static unsigned s502e_pclk_options[] = { 5, 3600, 5000, 7200, 8000, 10000 };
269static unsigned s503_pclk_options[] = { 3, 7200, 8000, 10000 };
270static unsigned s507_pclk_options[] = { 1, 12288 };
271static unsigned s508_pclk_options[] = { 1, 16000 };
272
273/* Host memory control register masks */
274static unsigned char s502a_hmcr[] =
275{
276 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, /* A0000 - AC000 */
277 0x20, 0x22, 0x24, 0x26, 0x28, 0x2A, 0x2C, /* C0000 - CC000 */
278 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, /* D0000 - DC000 */
279 0x30, 0x32, 0x34, 0x36, 0x38, 0x3A, 0x3C, /* E0000 - EC000 */
280};
281static unsigned char s502e_hmcr[] =
282{
283 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E, /* A0000 - AE000 */
284 0x20, 0x22, 0x24, 0x26, 0x28, 0x2A, 0x2C, 0x2E, /* C0000 - CE000 */
285 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, /* D0000 - DE000 */
286 0x30, 0x32, 0x34, 0x36, 0x38, 0x3A, 0x3C, 0x3E, /* E0000 - EE000 */
287};
288static unsigned char s507_hmcr[] =
289{
290 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, /* A0000 - AE000 */
291 0x40, 0x42, 0x44, 0x46, 0x48, 0x4A, 0x4C, 0x4E, /* B0000 - BE000 */
292 0x80, 0x82, 0x84, 0x86, 0x88, 0x8A, 0x8C, 0x8E, /* C0000 - CE000 */
293 0xC0, 0xC2, 0xC4, 0xC6, 0xC8, 0xCA, 0xCC, 0xCE, /* E0000 - EE000 */
294};
295static unsigned char s508_hmcr[] =
296{
297 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* A0000 - AE000 */
298 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* C0000 - CE000 */
299 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, /* D0000 - DE000 */
300 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, /* E0000 - EE000 */
301};
302
303static unsigned char s507_irqmask[] =
304{
305 0x00, 0x20, 0x40, 0x60, 0x80, 0xA0, 0xC0, 0xE0
306};
307
308static int pci_slot_ar[MAX_S514_CARDS];
309
310/******* Kernel Loadable Module Entry Points ********************************/
311
312/*============================================================================
313 * Module 'insert' entry point.
314 * o print announcement
315 * o initialize static data
316 * o calibrate SDLA shared memory access delay.
317 *
318 * Return: 0 Ok
319 * < 0 error.
320 * Context: process
321 */
322
323static int __init sdladrv_init(void)
324{
325 int i=0;
326
327 printk(KERN_INFO "%s v%u.%u %s\n",
328 fullname, MOD_VERSION, MOD_RELEASE, copyright);
329 exec_idle = calibrate_delay(EXEC_DELAY);
330#ifdef WANDEBUG
331 printk(KERN_DEBUG "%s: exec_idle = %d\n", modname, exec_idle);
332#endif
333
334 /* Initialize the PCI Card array, which
335 * will store flags, used to mark
336 * card initialization state */
337 for (i=0; i<MAX_S514_CARDS; i++)
338 pci_slot_ar[i] = 0xFF;
339
340 return 0;
341}
342
343/*============================================================================
344 * Module 'remove' entry point.
345 * o release all remaining system resources
346 */
347static void __exit sdladrv_cleanup(void)
348{
349}
350
351module_init(sdladrv_init);
352module_exit(sdladrv_cleanup);
353
354/******* Kernel APIs ********************************************************/
355
356/*============================================================================
357 * Set up adapter.
358 * o detect adapter type
359 * o verify hardware configuration options
360 * o check for hardware conflicts
361 * o set up adapter shared memory
362 * o test adapter memory
363 * o load firmware
364 * Return: 0 ok.
365 * < 0 error
366 */
367
368EXPORT_SYMBOL(sdla_setup);
369
370int sdla_setup (sdlahw_t* hw, void* sfm, unsigned len)
371{
372 unsigned* irq_opt = NULL; /* IRQ options */
373 unsigned* dpmbase_opt = NULL; /* DPM window base options */
374 unsigned* pclk_opt = NULL; /* CPU clock rate options */
375 int err=0;
376
377 if (sdla_detect(hw)) {
378 if(hw->type != SDLA_S514)
379 printk(KERN_INFO "%s: no SDLA card found at port 0x%X\n",
380 modname, hw->port);
381 return -EINVAL;
382 }
383
384 if(hw->type != SDLA_S514) {
385 printk(KERN_INFO "%s: found S%04u card at port 0x%X.\n",
386 modname, hw->type, hw->port);
387
388 hw->dpmsize = SDLA_WINDOWSIZE;
389 switch (hw->type) {
390 case SDLA_S502A:
391 hw->io_range = S502A_IORANGE;
392 irq_opt = s502a_irq_options;
393 dpmbase_opt = s502a_dpmbase_options;
394 pclk_opt = s502a_pclk_options;
395 break;
396
397 case SDLA_S502E:
398 hw->io_range = S502E_IORANGE;
399 irq_opt = s502e_irq_options;
400 dpmbase_opt = s508_dpmbase_options;
401 pclk_opt = s502e_pclk_options;
402 break;
403
404 case SDLA_S503:
405 hw->io_range = S503_IORANGE;
406 irq_opt = s503_irq_options;
407 dpmbase_opt = s508_dpmbase_options;
408 pclk_opt = s503_pclk_options;
409 break;
410
411 case SDLA_S507:
412 hw->io_range = S507_IORANGE;
413 irq_opt = s508_irq_options;
414 dpmbase_opt = s507_dpmbase_options;
415 pclk_opt = s507_pclk_options;
416 break;
417
418 case SDLA_S508:
419 hw->io_range = S508_IORANGE;
420 irq_opt = s508_irq_options;
421 dpmbase_opt = s508_dpmbase_options;
422 pclk_opt = s508_pclk_options;
423 break;
424 }
425
426 /* Verify IRQ configuration options */
427 if (!get_option_index(irq_opt, hw->irq)) {
428 printk(KERN_INFO "%s: IRQ %d is invalid!\n",
429 modname, hw->irq);
430 return -EINVAL;
431 }
432
433 /* Verify CPU clock rate configuration options */
434 if (hw->pclk == 0)
435 hw->pclk = pclk_opt[1]; /* use default */
436
437 else if (!get_option_index(pclk_opt, hw->pclk)) {
438 printk(KERN_INFO "%s: CPU clock %u is invalid!\n",
439 modname, hw->pclk);
440 return -EINVAL;
441 }
442 printk(KERN_INFO "%s: assuming CPU clock rate of %u kHz.\n",
443 modname, hw->pclk);
444
445 /* Setup adapter dual-port memory window and test memory */
446 if (hw->dpmbase == 0) {
447 err = sdla_autodpm(hw);
448 if (err) {
449 printk(KERN_INFO
450 "%s: can't find available memory region!\n",
451 modname);
452 return err;
453 }
454 }
455 else if (!get_option_index(dpmbase_opt,
456 virt_to_phys(hw->dpmbase))) {
457 printk(KERN_INFO
458 "%s: memory address 0x%lX is invalid!\n",
459 modname, virt_to_phys(hw->dpmbase));
460 return -EINVAL;
461 }
462 else if (sdla_setdpm(hw)) {
463 printk(KERN_INFO
464 "%s: 8K memory region at 0x%lX is not available!\n",
465 modname, virt_to_phys(hw->dpmbase));
466 return -EINVAL;
467 }
468 printk(KERN_INFO
469 "%s: dual-port memory window is set at 0x%lX.\n",
470 modname, virt_to_phys(hw->dpmbase));
471
472
473 /* If we find memory in 0xE**** Memory region,
474 * warn the user to disable the SHADOW RAM.
475 * Since memory corruption can occur if SHADOW is
476 * enabled. This can causes random crashes ! */
477 if (virt_to_phys(hw->dpmbase) >= 0xE0000){
478 printk(KERN_WARNING "\n%s: !!!!!!!! WARNING !!!!!!!!\n",modname);
479 printk(KERN_WARNING "%s: WANPIPE is using 0x%lX memory region !!!\n",
480 modname, virt_to_phys(hw->dpmbase));
481 printk(KERN_WARNING " Please disable the SHADOW RAM, otherwise\n");
482 printk(KERN_WARNING " your system might crash randomly from time to time !\n");
483 printk(KERN_WARNING "%s: !!!!!!!! WARNING !!!!!!!!\n\n",modname);
484 }
485 }
486
487 else {
488 hw->memory = test_memregion((void*)hw->dpmbase,
489 MAX_SIZEOF_S514_MEMORY);
490 if(hw->memory < (256 * 1024)) {
491 printk(KERN_INFO
492 "%s: error in testing S514 memory (0x%lX)\n",
493 modname, hw->memory);
494 sdla_down(hw);
495 return -EINVAL;
496 }
497 }
498
499 printk(KERN_INFO "%s: found %luK bytes of on-board memory\n",
500 modname, hw->memory / 1024);
501
502 /* Load firmware. If loader fails then shut down adapter */
503 err = sdla_load(hw, sfm, len);
504 if (err) sdla_down(hw); /* shutdown adapter */
505
506 return err;
507}
508
509/*============================================================================
510 * Shut down SDLA: disable shared memory access and interrupts, stop CPU, etc.
511 */
512
513EXPORT_SYMBOL(sdla_down);
514
515int sdla_down (sdlahw_t* hw)
516{
517 unsigned port = hw->port;
518 int i;
519 unsigned char CPU_no;
520 u32 int_config, int_status;
521
522 if(!port && (hw->type != SDLA_S514))
523 return -EFAULT;
524
525 switch (hw->type) {
526 case SDLA_S502A:
527 _OUTB(port, 0x08); /* halt CPU */
528 _OUTB(port, 0x08);
529 _OUTB(port, 0x08);
530 hw->regs[0] = 0x08;
531 _OUTB(port + 1, 0xFF); /* close memory window */
532 hw->regs[1] = 0xFF;
533 break;
534
535 case SDLA_S502E:
536 _OUTB(port + 3, 0); /* stop CPU */
537 _OUTB(port, 0); /* reset board */
538 for (i = 0; i < S502E_IORANGE; ++i)
539 hw->regs[i] = 0
540 ;
541 break;
542
543 case SDLA_S503:
544 case SDLA_S507:
545 case SDLA_S508:
546 _OUTB(port, 0); /* reset board logic */
547 hw->regs[0] = 0;
548 break;
549
550 case SDLA_S514:
551 /* halt the adapter */
552 *(char *)hw->vector = S514_CPU_HALT;
553 CPU_no = hw->S514_cpu_no[0];
554
555 /* disable the PCI IRQ and disable memory access */
556 pci_read_config_dword(hw->pci_dev, PCI_INT_CONFIG, &int_config);
557 int_config &= (CPU_no == S514_CPU_A) ? ~PCI_DISABLE_IRQ_CPU_A : ~PCI_DISABLE_IRQ_CPU_B;
558 pci_write_config_dword(hw->pci_dev, PCI_INT_CONFIG, int_config);
559 read_S514_int_stat(hw, &int_status);
560 S514_intack(hw, int_status);
561 if(CPU_no == S514_CPU_A)
562 pci_write_config_dword(hw->pci_dev, PCI_MAP0_DWORD,
563 PCI_CPU_A_MEM_DISABLE);
564 else
565 pci_write_config_dword(hw->pci_dev, PCI_MAP1_DWORD,
566 PCI_CPU_B_MEM_DISABLE);
567
568 /* free up the allocated virtual memory */
569 iounmap((void *)hw->dpmbase);
570 iounmap((void *)hw->vector);
571 break;
572
573
574 default:
575 return -EINVAL;
576 }
577 return 0;
578}
579
580/*============================================================================
581 * Map shared memory window into SDLA address space.
582 */
583
584EXPORT_SYMBOL(sdla_mapmem);
585
586int sdla_mapmem (sdlahw_t* hw, unsigned long addr)
587{
588 unsigned port = hw->port;
589 register int tmp;
590
591 switch (hw->type) {
592 case SDLA_S502A:
593 case SDLA_S502E:
594 if (addr < S502_MAXMEM) { /* verify parameter */
595 tmp = addr >> 13; /* convert to register mask */
596 _OUTB(port + 2, tmp);
597 hw->regs[2] = tmp;
598 }
599 else return -EINVAL;
600 break;
601
602 case SDLA_S503:
603 if (addr < S503_MAXMEM) { /* verify parameter */
604 tmp = (hw->regs[0] & 0x8F) | ((addr >> 9) & 0x70);
605 _OUTB(port, tmp);
606 hw->regs[0] = tmp;
607 }
608 else return -EINVAL;
609 break;
610
611 case SDLA_S507:
612 if (addr < S507_MAXMEM) {
613 if (!(_INB(port) & 0x02))
614 return -EIO;
615 tmp = addr >> 13; /* convert to register mask */
616 _OUTB(port + 2, tmp);
617 hw->regs[2] = tmp;
618 }
619 else return -EINVAL;
620 break;
621
622 case SDLA_S508:
623 if (addr < S508_MAXMEM) {
624 tmp = addr >> 13; /* convert to register mask */
625 _OUTB(port + 2, tmp);
626 hw->regs[2] = tmp;
627 }
628 else return -EINVAL;
629 break;
630
631 case SDLA_S514:
632 return 0;
633
634 default:
635 return -EINVAL;
636 }
637 hw->vector = addr & 0xFFFFE000L;
638 return 0;
639}
640
641/*============================================================================
642 * Enable interrupt generation.
643 */
644
645EXPORT_SYMBOL(sdla_inten);
646
647int sdla_inten (sdlahw_t* hw)
648{
649 unsigned port = hw->port;
650 int tmp, i;
651
652 switch (hw->type) {
653 case SDLA_S502E:
654 /* Note thar interrupt control operations on S502E are allowed
655 * only if CPU is enabled (bit 0 of status register is set).
656 */
657 if (_INB(port) & 0x01) {
658 _OUTB(port, 0x02); /* bit1 = 1, bit2 = 0 */
659 _OUTB(port, 0x06); /* bit1 = 1, bit2 = 1 */
660 hw->regs[0] = 0x06;
661 }
662 else return -EIO;
663 break;
664
665 case SDLA_S503:
666 tmp = hw->regs[0] | 0x04;
667 _OUTB(port, tmp);
668 hw->regs[0] = tmp; /* update mirror */
669 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
670 if (!(_INB(port) & 0x02)) /* verify */
671 return -EIO;
672 break;
673
674 case SDLA_S508:
675 tmp = hw->regs[0] | 0x10;
676 _OUTB(port, tmp);
677 hw->regs[0] = tmp; /* update mirror */
678 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
679 if (!(_INB(port + 1) & 0x10)) /* verify */
680 return -EIO;
681 break;
682
683 case SDLA_S502A:
684 case SDLA_S507:
685 break;
686
687 case SDLA_S514:
688 break;
689
690 default:
691 return -EINVAL;
692
693 }
694 return 0;
695}
696
697/*============================================================================
698 * Disable interrupt generation.
699 */
700
701EXPORT_SYMBOL(sdla_intde);
702
703int sdla_intde (sdlahw_t* hw)
704{
705 unsigned port = hw->port;
706 int tmp, i;
707
708 switch (hw->type) {
709 case SDLA_S502E:
710 /* Notes:
711 * 1) interrupt control operations are allowed only if CPU is
712 * enabled (bit 0 of status register is set).
713 * 2) disabling interrupts using bit 1 of control register
714 * causes IRQ line go high, therefore we are going to use
715 * 0x04 instead: lower it to inhibit interrupts to PC.
716 */
717 if (_INB(port) & 0x01) {
718 _OUTB(port, hw->regs[0] & ~0x04);
719 hw->regs[0] &= ~0x04;
720 }
721 else return -EIO;
722 break;
723
724 case SDLA_S503:
725 tmp = hw->regs[0] & ~0x04;
726 _OUTB(port, tmp);
727 hw->regs[0] = tmp; /* update mirror */
728 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
729 if (_INB(port) & 0x02) /* verify */
730 return -EIO;
731 break;
732
733 case SDLA_S508:
734 tmp = hw->regs[0] & ~0x10;
735 _OUTB(port, tmp);
736 hw->regs[0] = tmp; /* update mirror */
737 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
738 if (_INB(port) & 0x10) /* verify */
739 return -EIO;
740 break;
741
742 case SDLA_S502A:
743 case SDLA_S507:
744 break;
745
746 default:
747 return -EINVAL;
748 }
749 return 0;
750}
751
752/*============================================================================
753 * Acknowledge SDLA hardware interrupt.
754 */
755
756EXPORT_SYMBOL(sdla_intack);
757
758int sdla_intack (sdlahw_t* hw)
759{
760 unsigned port = hw->port;
761 int tmp;
762
763 switch (hw->type) {
764 case SDLA_S502E:
765 /* To acknoledge hardware interrupt we have to toggle bit 3 of
766 * control register: \_/
767 * Note that interrupt control operations on S502E are allowed
768 * only if CPU is enabled (bit 1 of status register is set).
769 */
770 if (_INB(port) & 0x01) {
771 tmp = hw->regs[0] & ~0x04;
772 _OUTB(port, tmp);
773 tmp |= 0x04;
774 _OUTB(port, tmp);
775 hw->regs[0] = tmp;
776 }
777 else return -EIO;
778 break;
779
780 case SDLA_S503:
781 if (_INB(port) & 0x04) {
782 tmp = hw->regs[0] & ~0x08;
783 _OUTB(port, tmp);
784 tmp |= 0x08;
785 _OUTB(port, tmp);
786 hw->regs[0] = tmp;
787 }
788 break;
789
790 case SDLA_S502A:
791 case SDLA_S507:
792 case SDLA_S508:
793 break;
794
795 default:
796 return -EINVAL;
797 }
798 return 0;
799}
800
801
802/*============================================================================
803 * Acknowledge S514 hardware interrupt.
804 */
805
806EXPORT_SYMBOL(S514_intack);
807
808void S514_intack (sdlahw_t* hw, u32 int_status)
809{
810 pci_write_config_dword(hw->pci_dev, PCI_INT_STATUS, int_status);
811}
812
813
814/*============================================================================
815 * Read the S514 hardware interrupt status.
816 */
817
818EXPORT_SYMBOL(read_S514_int_stat);
819
820void read_S514_int_stat (sdlahw_t* hw, u32* int_status)
821{
822 pci_read_config_dword(hw->pci_dev, PCI_INT_STATUS, int_status);
823}
824
825
826/*============================================================================
827 * Generate an interrupt to adapter's CPU.
828 */
829
830EXPORT_SYMBOL(sdla_intr);
831
832int sdla_intr (sdlahw_t* hw)
833{
834 unsigned port = hw->port;
835
836 switch (hw->type) {
837 case SDLA_S502A:
838 if (!(_INB(port) & 0x40)) {
839 _OUTB(port, 0x10); /* issue NMI to CPU */
840 hw->regs[0] = 0x10;
841 }
842 else return -EIO;
843 break;
844
845 case SDLA_S507:
846 if ((_INB(port) & 0x06) == 0x06) {
847 _OUTB(port + 3, 0);
848 }
849 else return -EIO;
850 break;
851
852 case SDLA_S508:
853 if (_INB(port + 1) & 0x02) {
854 _OUTB(port, 0x08);
855 }
856 else return -EIO;
857 break;
858
859 case SDLA_S502E:
860 case SDLA_S503:
861 default:
862 return -EINVAL;
863 }
864 return 0;
865}
866
867/*============================================================================
868 * Execute Adapter Command.
869 * o Set exec flag.
870 * o Busy-wait until flag is reset.
871 * o Return number of loops made, or 0 if command timed out.
872 */
873
874EXPORT_SYMBOL(sdla_exec);
875
876int sdla_exec (void* opflag)
877{
878 volatile unsigned char* flag = opflag;
879 unsigned long tstop;
880 int nloops;
881
882 if(readb(flag) != 0x00) {
883 printk(KERN_INFO
884 "WANPIPE: opp flag set on entry to sdla_exec\n");
885 return 0;
886 }
887
888 writeb(0x01, flag);
889
890 tstop = SYSTEM_TICK + EXEC_TIMEOUT;
891
892 for (nloops = 1; (readb(flag) == 0x01); ++ nloops) {
893 unsigned delay = exec_idle;
894 while (-- delay); /* delay */
895 if (SYSTEM_TICK > tstop) return 0; /* time is up! */
896 }
897 return nloops;
898}
899
900/*============================================================================
901 * Read absolute adapter memory.
902 * Transfer data from adapter's memory to data buffer.
903 *
904 * Note:
905 * Care should be taken when crossing dual-port memory window boundary.
906 * This function is not atomic, so caller must disable interrupt if
907 * interrupt routines are accessing adapter shared memory.
908 */
909
910EXPORT_SYMBOL(sdla_peek);
911
912int sdla_peek (sdlahw_t* hw, unsigned long addr, void* buf, unsigned len)
913{
914
915 if (addr + len > hw->memory) /* verify arguments */
916 return -EINVAL;
917
918 if(hw->type == SDLA_S514) { /* copy data for the S514 adapter */
919 peek_by_4 ((unsigned long)hw->dpmbase + addr, buf, len);
920 return 0;
921 }
922
923 else { /* copy data for the S508 adapter */
924 unsigned long oldvec = hw->vector;
925 unsigned winsize = hw->dpmsize;
926 unsigned curpos, curlen; /* current offset and block size */
927 unsigned long curvec; /* current DPM window vector */
928 int err = 0;
929
930 while (len && !err) {
931 curpos = addr % winsize; /* current window offset */
932 curvec = addr - curpos; /* current window vector */
933 curlen = (len > (winsize - curpos)) ?
934 (winsize - curpos) : len;
935 /* Relocate window and copy block of data */
936 err = sdla_mapmem(hw, curvec);
937 peek_by_4 ((unsigned long)hw->dpmbase + curpos, buf,
938 curlen);
939 addr += curlen;
940 buf = (char*)buf + curlen;
941 len -= curlen;
942 }
943
944 /* Restore DPM window position */
945 sdla_mapmem(hw, oldvec);
946 return err;
947 }
948}
949
950
951/*============================================================================
952 * Read data from adapter's memory to a data buffer in 4-byte chunks.
953 * Note that we ensure that the SDLA memory address is on a 4-byte boundary
954 * before we begin moving the data in 4-byte chunks.
955*/
956
957static void peek_by_4 (unsigned long src, void* buf, unsigned len)
958{
959
960 /* byte copy data until we get to a 4-byte boundary */
961 while (len && (src & 0x03)) {
962 *(char *)buf ++ = readb(src ++);
963 len --;
964 }
965
966 /* copy data in 4-byte chunks */
967 while (len >= 4) {
968 *(unsigned long *)buf = readl(src);
969 buf += 4;
970 src += 4;
971 len -= 4;
972 }
973
974 /* byte copy any remaining data */
975 while (len) {
976 *(char *)buf ++ = readb(src ++);
977 len --;
978 }
979}
980
981
982/*============================================================================
983 * Write Absolute Adapter Memory.
984 * Transfer data from data buffer to adapter's memory.
985 *
986 * Note:
987 * Care should be taken when crossing dual-port memory window boundary.
988 * This function is not atomic, so caller must disable interrupt if
989 * interrupt routines are accessing adapter shared memory.
990 */
991
992EXPORT_SYMBOL(sdla_poke);
993
994int sdla_poke (sdlahw_t* hw, unsigned long addr, void* buf, unsigned len)
995{
996
997 if (addr + len > hw->memory) /* verify arguments */
998 return -EINVAL;
999
1000 if(hw->type == SDLA_S514) { /* copy data for the S514 adapter */
1001 poke_by_4 ((unsigned long)hw->dpmbase + addr, buf, len);
1002 return 0;
1003 }
1004
1005 else { /* copy data for the S508 adapter */
1006 unsigned long oldvec = hw->vector;
1007 unsigned winsize = hw->dpmsize;
1008 unsigned curpos, curlen; /* current offset and block size */
1009 unsigned long curvec; /* current DPM window vector */
1010 int err = 0;
1011
1012 while (len && !err) {
1013 curpos = addr % winsize; /* current window offset */
1014 curvec = addr - curpos; /* current window vector */
1015 curlen = (len > (winsize - curpos)) ?
1016 (winsize - curpos) : len;
1017 /* Relocate window and copy block of data */
1018 sdla_mapmem(hw, curvec);
1019 poke_by_4 ((unsigned long)hw->dpmbase + curpos, buf,
1020 curlen);
1021 addr += curlen;
1022 buf = (char*)buf + curlen;
1023 len -= curlen;
1024 }
1025
1026 /* Restore DPM window position */
1027 sdla_mapmem(hw, oldvec);
1028 return err;
1029 }
1030}
1031
1032
1033/*============================================================================
1034 * Write from a data buffer to adapter's memory in 4-byte chunks.
1035 * Note that we ensure that the SDLA memory address is on a 4-byte boundary
1036 * before we begin moving the data in 4-byte chunks.
1037*/
1038
1039static void poke_by_4 (unsigned long dest, void* buf, unsigned len)
1040{
1041
1042 /* byte copy data until we get to a 4-byte boundary */
1043 while (len && (dest & 0x03)) {
1044 writeb (*(char *)buf ++, dest ++);
1045 len --;
1046 }
1047
1048 /* copy data in 4-byte chunks */
1049 while (len >= 4) {
1050 writel (*(unsigned long *)buf, dest);
1051 dest += 4;
1052 buf += 4;
1053 len -= 4;
1054 }
1055
1056 /* byte copy any remaining data */
1057 while (len) {
1058 writeb (*(char *)buf ++ , dest ++);
1059 len --;
1060 }
1061}
1062
1063
1064#ifdef DONT_COMPIPLE_THIS
1065#endif /* DONT_COMPIPLE_THIS */
1066
1067/****** Hardware-Specific Functions *****************************************/
1068
1069/*============================================================================
1070 * Detect adapter type.
1071 * o if adapter type is specified then call detection routine for that adapter
1072 * type. Otherwise call detection routines for every adapter types until
1073 * adapter is detected.
1074 *
1075 * Notes:
1076 * 1) Detection tests are destructive! Adapter will be left in shutdown state
1077 * after the test.
1078 */
1079static int sdla_detect (sdlahw_t* hw)
1080{
1081 unsigned port = hw->port;
1082 int err = 0;
1083
1084 if (!port && (hw->type != SDLA_S514))
1085 return -EFAULT;
1086
1087 switch (hw->type) {
1088 case SDLA_S502A:
1089 if (!detect_s502a(port)) err = -ENODEV;
1090 break;
1091
1092 case SDLA_S502E:
1093 if (!detect_s502e(port)) err = -ENODEV;
1094 break;
1095
1096 case SDLA_S503:
1097 if (!detect_s503(port)) err = -ENODEV;
1098 break;
1099
1100 case SDLA_S507:
1101 if (!detect_s507(port)) err = -ENODEV;
1102 break;
1103
1104 case SDLA_S508:
1105 if (!detect_s508(port)) err = -ENODEV;
1106 break;
1107
1108 case SDLA_S514:
1109 if (!detect_s514(hw)) err = -ENODEV;
1110 break;
1111
1112 default:
1113 if (detect_s502a(port))
1114 hw->type = SDLA_S502A;
1115 else if (detect_s502e(port))
1116 hw->type = SDLA_S502E;
1117 else if (detect_s503(port))
1118 hw->type = SDLA_S503;
1119 else if (detect_s507(port))
1120 hw->type = SDLA_S507;
1121 else if (detect_s508(port))
1122 hw->type = SDLA_S508;
1123 else err = -ENODEV;
1124 }
1125 return err;
1126}
1127
1128/*============================================================================
1129 * Autoselect memory region.
1130 * o try all available DMP address options from the top down until success.
1131 */
1132static int sdla_autodpm (sdlahw_t* hw)
1133{
1134 int i, err = -EINVAL;
1135 unsigned* opt;
1136
1137 switch (hw->type) {
1138 case SDLA_S502A:
1139 opt = s502a_dpmbase_options;
1140 break;
1141
1142 case SDLA_S502E:
1143 case SDLA_S503:
1144 case SDLA_S508:
1145 opt = s508_dpmbase_options;
1146 break;
1147
1148 case SDLA_S507:
1149 opt = s507_dpmbase_options;
1150 break;
1151
1152 default:
1153 return -EINVAL;
1154 }
1155
1156 /* Start testing from 8th position, address
1157 * 0xC8000 from the 508 address table.
1158 * We don't want to test A**** addresses, since
1159 * they are usually used for Video */
1160 for (i = 8; i <= opt[0] && err; i++) {
1161 hw->dpmbase = phys_to_virt(opt[i]);
1162 err = sdla_setdpm(hw);
1163 }
1164 return err;
1165}
1166
1167/*============================================================================
1168 * Set up adapter dual-port memory window.
1169 * o shut down adapter
1170 * o make sure that no physical memory exists in this region, i.e entire
1171 * region reads 0xFF and is not writable when adapter is shut down.
1172 * o initialize adapter hardware
1173 * o make sure that region is usable with SDLA card, i.e. we can write to it
1174 * when adapter is configured.
1175 */
1176static int sdla_setdpm (sdlahw_t* hw)
1177{
1178 int err;
1179
1180 /* Shut down card and verify memory region */
1181 sdla_down(hw);
1182 if (check_memregion(hw->dpmbase, hw->dpmsize))
1183 return -EINVAL;
1184
1185 /* Initialize adapter and test on-board memory segment by segment.
1186 * If memory size appears to be less than shared memory window size,
1187 * assume that memory region is unusable.
1188 */
1189 err = sdla_init(hw);
1190 if (err) return err;
1191
1192 if (sdla_memtest(hw) < hw->dpmsize) { /* less than window size */
1193 sdla_down(hw);
1194 return -EIO;
1195 }
1196 sdla_mapmem(hw, 0L); /* set window vector at bottom */
1197 return 0;
1198}
1199
1200/*============================================================================
1201 * Load adapter from the memory image of the SDLA firmware module.
1202 * o verify firmware integrity and compatibility
1203 * o start adapter up
1204 */
1205static int sdla_load (sdlahw_t* hw, sfm_t* sfm, unsigned len)
1206{
1207
1208 int i;
1209
1210 /* Verify firmware signature */
1211 if (strcmp(sfm->signature, SFM_SIGNATURE)) {
1212 printk(KERN_INFO "%s: not SDLA firmware!\n",
1213 modname);
1214 return -EINVAL;
1215 }
1216
1217 /* Verify firmware module format version */
1218 if (sfm->version != SFM_VERSION) {
1219 printk(KERN_INFO
1220 "%s: firmware format %u rejected! Expecting %u.\n",
1221 modname, sfm->version, SFM_VERSION);
1222 return -EINVAL;
1223 }
1224
1225 /* Verify firmware module length and checksum */
1226 if ((len - offsetof(sfm_t, image) != sfm->info.codesize) ||
1227 (checksum((void*)&sfm->info,
1228 sizeof(sfm_info_t) + sfm->info.codesize) != sfm->checksum)) {
1229 printk(KERN_INFO "%s: firmware corrupted!\n", modname);
1230 return -EINVAL;
1231 }
1232
1233 /* Announce */
1234 printk(KERN_INFO "%s: loading %s (ID=%u)...\n", modname,
1235 (sfm->descr[0] != '\0') ? sfm->descr : "unknown firmware",
1236 sfm->info.codeid);
1237
1238 if(hw->type == SDLA_S514)
1239 printk(KERN_INFO "%s: loading S514 adapter, CPU %c\n",
1240 modname, hw->S514_cpu_no[0]);
1241
1242 /* Scan through the list of compatible adapters and make sure our
1243 * adapter type is listed.
1244 */
1245 for (i = 0;
1246 (i < SFM_MAX_SDLA) && (sfm->info.adapter[i] != hw->type);
1247 ++i);
1248
1249 if (i == SFM_MAX_SDLA) {
1250 printk(KERN_INFO "%s: firmware is not compatible with S%u!\n",
1251 modname, hw->type);
1252 return -EINVAL;
1253 }
1254
1255
1256 /* Make sure there is enough on-board memory */
1257 if (hw->memory < sfm->info.memsize) {
1258 printk(KERN_INFO
1259 "%s: firmware needs %lu bytes of on-board memory!\n",
1260 modname, sfm->info.memsize);
1261 return -EINVAL;
1262 }
1263
1264 /* Move code onto adapter */
1265 if (sdla_poke(hw, sfm->info.codeoffs, sfm->image, sfm->info.codesize)) {
1266 printk(KERN_INFO "%s: failed to load code segment!\n",
1267 modname);
1268 return -EIO;
1269 }
1270
1271 /* Prepare boot-time configuration data and kick-off CPU */
1272 sdla_bootcfg(hw, &sfm->info);
1273 if (sdla_start(hw, sfm->info.startoffs)) {
1274 printk(KERN_INFO "%s: Damn... Adapter won't start!\n",
1275 modname);
1276 return -EIO;
1277 }
1278
1279 /* position DPM window over the mailbox and enable interrupts */
1280 if (sdla_mapmem(hw, sfm->info.winoffs) || sdla_inten(hw)) {
1281 printk(KERN_INFO "%s: adapter hardware failure!\n",
1282 modname);
1283 return -EIO;
1284 }
1285 hw->fwid = sfm->info.codeid; /* set firmware ID */
1286 return 0;
1287}
1288
1289/*============================================================================
1290 * Initialize SDLA hardware: setup memory window, IRQ, etc.
1291 */
1292static int sdla_init (sdlahw_t* hw)
1293{
1294 int i;
1295
1296 for (i = 0; i < SDLA_MAXIORANGE; ++i)
1297 hw->regs[i] = 0;
1298
1299 switch (hw->type) {
1300 case SDLA_S502A: return init_s502a(hw);
1301 case SDLA_S502E: return init_s502e(hw);
1302 case SDLA_S503: return init_s503(hw);
1303 case SDLA_S507: return init_s507(hw);
1304 case SDLA_S508: return init_s508(hw);
1305 }
1306 return -EINVAL;
1307}
1308
1309/*============================================================================
1310 * Test adapter on-board memory.
1311 * o slide DPM window from the bottom up and test adapter memory segment by
1312 * segment.
1313 * Return adapter memory size.
1314 */
1315static unsigned long sdla_memtest (sdlahw_t* hw)
1316{
1317 unsigned long memsize;
1318 unsigned winsize;
1319
1320 for (memsize = 0, winsize = hw->dpmsize;
1321 !sdla_mapmem(hw, memsize) &&
1322 (test_memregion(hw->dpmbase, winsize) == winsize)
1323 ;
1324 memsize += winsize)
1325 ;
1326 hw->memory = memsize;
1327 return memsize;
1328}
1329
1330/*============================================================================
1331 * Prepare boot-time firmware configuration data.
1332 * o position DPM window
1333 * o initialize configuration data area
1334 */
1335static int sdla_bootcfg (sdlahw_t* hw, sfm_info_t* sfminfo)
1336{
1337 unsigned char* data;
1338
1339 if (!sfminfo->datasize) return 0; /* nothing to do */
1340
1341 if (sdla_mapmem(hw, sfminfo->dataoffs) != 0)
1342 return -EIO;
1343
1344 if(hw->type == SDLA_S514)
1345 data = (void*)(hw->dpmbase + sfminfo->dataoffs);
1346 else
1347 data = (void*)((u8 *)hw->dpmbase +
1348 (sfminfo->dataoffs - hw->vector));
1349
1350 memset_io (data, 0, sfminfo->datasize);
1351
1352 writeb (make_config_byte(hw), &data[0x00]);
1353
1354 switch (sfminfo->codeid) {
1355 case SFID_X25_502:
1356 case SFID_X25_508:
1357 writeb (3, &data[0x01]); /* T1 timer */
1358 writeb (10, &data[0x03]); /* N2 */
1359 writeb (7, &data[0x06]); /* HDLC window size */
1360 writeb (1, &data[0x0B]); /* DTE */
1361 writeb (2, &data[0x0C]); /* X.25 packet window size */
1362 writew (128, &data[0x0D]); /* default X.25 data size */
1363 writew (128, &data[0x0F]); /* maximum X.25 data size */
1364 break;
1365 }
1366 return 0;
1367}
1368
1369/*============================================================================
1370 * Prepare configuration byte identifying adapter type and CPU clock rate.
1371 */
1372static unsigned char make_config_byte (sdlahw_t* hw)
1373{
1374 unsigned char byte = 0;
1375
1376 switch (hw->pclk) {
1377 case 5000: byte = 0x01; break;
1378 case 7200: byte = 0x02; break;
1379 case 8000: byte = 0x03; break;
1380 case 10000: byte = 0x04; break;
1381 case 16000: byte = 0x05; break;
1382 }
1383
1384 switch (hw->type) {
1385 case SDLA_S502E: byte |= 0x80; break;
1386 case SDLA_S503: byte |= 0x40; break;
1387 }
1388 return byte;
1389}
1390
1391/*============================================================================
1392 * Start adapter's CPU.
1393 * o calculate a pointer to adapter's cold boot entry point
1394 * o position DPM window
1395 * o place boot instruction (jp addr) at cold boot entry point
1396 * o start CPU
1397 */
1398static int sdla_start (sdlahw_t* hw, unsigned addr)
1399{
1400 unsigned port = hw->port;
1401 unsigned char *bootp;
1402 int err, tmp, i;
1403
1404 if (!port && (hw->type != SDLA_S514)) return -EFAULT;
1405
1406 switch (hw->type) {
1407 case SDLA_S502A:
1408 bootp = hw->dpmbase;
1409 bootp += 0x66;
1410 break;
1411
1412 case SDLA_S502E:
1413 case SDLA_S503:
1414 case SDLA_S507:
1415 case SDLA_S508:
1416 case SDLA_S514:
1417 bootp = hw->dpmbase;
1418 break;
1419
1420 default:
1421 return -EINVAL;
1422 }
1423
1424 err = sdla_mapmem(hw, 0);
1425 if (err) return err;
1426
1427 writeb (0xC3, bootp); /* Z80: 'jp' opcode */
1428 bootp ++;
1429 writew (addr, bootp);
1430
1431 switch (hw->type) {
1432 case SDLA_S502A:
1433 _OUTB(port, 0x10); /* issue NMI to CPU */
1434 hw->regs[0] = 0x10;
1435 break;
1436
1437 case SDLA_S502E:
1438 _OUTB(port + 3, 0x01); /* start CPU */
1439 hw->regs[3] = 0x01;
1440 for (i = 0; i < SDLA_IODELAY; ++i);
1441 if (_INB(port) & 0x01) { /* verify */
1442 /*
1443 * Enabling CPU changes functionality of the
1444 * control register, so we have to reset its
1445 * mirror.
1446 */
1447 _OUTB(port, 0); /* disable interrupts */
1448 hw->regs[0] = 0;
1449 }
1450 else return -EIO;
1451 break;
1452
1453 case SDLA_S503:
1454 tmp = hw->regs[0] | 0x09; /* set bits 0 and 3 */
1455 _OUTB(port, tmp);
1456 hw->regs[0] = tmp; /* update mirror */
1457 for (i = 0; i < SDLA_IODELAY; ++i);
1458 if (!(_INB(port) & 0x01)) /* verify */
1459 return -EIO;
1460 break;
1461
1462 case SDLA_S507:
1463 tmp = hw->regs[0] | 0x02;
1464 _OUTB(port, tmp);
1465 hw->regs[0] = tmp; /* update mirror */
1466 for (i = 0; i < SDLA_IODELAY; ++i);
1467 if (!(_INB(port) & 0x04)) /* verify */
1468 return -EIO;
1469 break;
1470
1471 case SDLA_S508:
1472 tmp = hw->regs[0] | 0x02;
1473 _OUTB(port, tmp);
1474 hw->regs[0] = tmp; /* update mirror */
1475 for (i = 0; i < SDLA_IODELAY; ++i);
1476 if (!(_INB(port + 1) & 0x02)) /* verify */
1477 return -EIO;
1478 break;
1479
1480 case SDLA_S514:
1481 writeb (S514_CPU_START, hw->vector);
1482 break;
1483
1484 default:
1485 return -EINVAL;
1486 }
1487 return 0;
1488}
1489
1490/*============================================================================
1491 * Initialize S502A adapter.
1492 */
1493static int init_s502a (sdlahw_t* hw)
1494{
1495 unsigned port = hw->port;
1496 int tmp, i;
1497
1498 if (!detect_s502a(port))
1499 return -ENODEV;
1500
1501 hw->regs[0] = 0x08;
1502 hw->regs[1] = 0xFF;
1503
1504 /* Verify configuration options */
1505 i = get_option_index(s502a_dpmbase_options, virt_to_phys(hw->dpmbase));
1506 if (i == 0)
1507 return -EINVAL;
1508
1509 tmp = s502a_hmcr[i - 1];
1510 switch (hw->dpmsize) {
1511 case 0x2000:
1512 tmp |= 0x01;
1513 break;
1514
1515 case 0x10000L:
1516 break;
1517
1518 default:
1519 return -EINVAL;
1520 }
1521
1522 /* Setup dual-port memory window (this also enables memory access) */
1523 _OUTB(port + 1, tmp);
1524 hw->regs[1] = tmp;
1525 hw->regs[0] = 0x08;
1526 return 0;
1527}
1528
1529/*============================================================================
1530 * Initialize S502E adapter.
1531 */
1532static int init_s502e (sdlahw_t* hw)
1533{
1534 unsigned port = hw->port;
1535 int tmp, i;
1536
1537 if (!detect_s502e(port))
1538 return -ENODEV;
1539
1540 /* Verify configuration options */
1541 i = get_option_index(s508_dpmbase_options, virt_to_phys(hw->dpmbase));
1542 if (i == 0)
1543 return -EINVAL;
1544
1545 tmp = s502e_hmcr[i - 1];
1546 switch (hw->dpmsize) {
1547 case 0x2000:
1548 tmp |= 0x01;
1549 break;
1550
1551 case 0x10000L:
1552 break;
1553
1554 default:
1555 return -EINVAL;
1556 }
1557
1558 /* Setup dual-port memory window */
1559 _OUTB(port + 1, tmp);
1560 hw->regs[1] = tmp;
1561
1562 /* Enable memory access */
1563 _OUTB(port, 0x02);
1564 hw->regs[0] = 0x02;
1565 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1566 return (_INB(port) & 0x02) ? 0 : -EIO;
1567}
1568
1569/*============================================================================
1570 * Initialize S503 adapter.
1571 * ---------------------------------------------------------------------------
1572 */
1573static int init_s503 (sdlahw_t* hw)
1574{
1575 unsigned port = hw->port;
1576 int tmp, i;
1577
1578 if (!detect_s503(port))
1579 return -ENODEV;
1580
1581 /* Verify configuration options */
1582 i = get_option_index(s508_dpmbase_options, virt_to_phys(hw->dpmbase));
1583 if (i == 0)
1584 return -EINVAL;
1585
1586 tmp = s502e_hmcr[i - 1];
1587 switch (hw->dpmsize) {
1588 case 0x2000:
1589 tmp |= 0x01;
1590 break;
1591
1592 case 0x10000L:
1593 break;
1594
1595 default:
1596 return -EINVAL;
1597 }
1598
1599 /* Setup dual-port memory window */
1600 _OUTB(port + 1, tmp);
1601 hw->regs[1] = tmp;
1602
1603 /* Enable memory access */
1604 _OUTB(port, 0x02);
1605 hw->regs[0] = 0x02; /* update mirror */
1606 return 0;
1607}
1608
1609/*============================================================================
1610 * Initialize S507 adapter.
1611 */
1612static int init_s507 (sdlahw_t* hw)
1613{
1614 unsigned port = hw->port;
1615 int tmp, i;
1616
1617 if (!detect_s507(port))
1618 return -ENODEV;
1619
1620 /* Verify configuration options */
1621 i = get_option_index(s507_dpmbase_options, virt_to_phys(hw->dpmbase));
1622 if (i == 0)
1623 return -EINVAL;
1624
1625 tmp = s507_hmcr[i - 1];
1626 switch (hw->dpmsize) {
1627 case 0x2000:
1628 tmp |= 0x01;
1629 break;
1630
1631 case 0x10000L:
1632 break;
1633
1634 default:
1635 return -EINVAL;
1636 }
1637
1638 /* Enable adapter's logic */
1639 _OUTB(port, 0x01);
1640 hw->regs[0] = 0x01;
1641 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1642 if (!(_INB(port) & 0x20))
1643 return -EIO;
1644
1645 /* Setup dual-port memory window */
1646 _OUTB(port + 1, tmp);
1647 hw->regs[1] = tmp;
1648
1649 /* Enable memory access */
1650 tmp = hw->regs[0] | 0x04;
1651 if (hw->irq) {
1652 i = get_option_index(s508_irq_options, hw->irq);
1653 if (i) tmp |= s507_irqmask[i - 1];
1654 }
1655 _OUTB(port, tmp);
1656 hw->regs[0] = tmp; /* update mirror */
1657 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1658 return (_INB(port) & 0x08) ? 0 : -EIO;
1659}
1660
1661/*============================================================================
1662 * Initialize S508 adapter.
1663 */
1664static int init_s508 (sdlahw_t* hw)
1665{
1666 unsigned port = hw->port;
1667 int tmp, i;
1668
1669 if (!detect_s508(port))
1670 return -ENODEV;
1671
1672 /* Verify configuration options */
1673 i = get_option_index(s508_dpmbase_options, virt_to_phys(hw->dpmbase));
1674 if (i == 0)
1675 return -EINVAL;
1676
1677 /* Setup memory configuration */
1678 tmp = s508_hmcr[i - 1];
1679 _OUTB(port + 1, tmp);
1680 hw->regs[1] = tmp;
1681
1682 /* Enable memory access */
1683 _OUTB(port, 0x04);
1684 hw->regs[0] = 0x04; /* update mirror */
1685 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1686 return (_INB(port + 1) & 0x04) ? 0 : -EIO;
1687}
1688
1689/*============================================================================
1690 * Detect S502A adapter.
1691 * Following tests are used to detect S502A adapter:
1692 * 1. All registers other than status (BASE) should read 0xFF
1693 * 2. After writing 00001000b to control register, status register should
1694 * read 01000000b.
1695 * 3. After writing 0 to control register, status register should still
1696 * read 01000000b.
1697 * 4. After writing 00000100b to control register, status register should
1698 * read 01000100b.
1699 * Return 1 if detected o.k. or 0 if failed.
1700 * Note: This test is destructive! Adapter will be left in shutdown
1701 * state after the test.
1702 */
1703static int detect_s502a (int port)
1704{
1705 int i, j;
1706
1707 if (!get_option_index(s502_port_options, port))
1708 return 0;
1709
1710 for (j = 1; j < SDLA_MAXIORANGE; ++j) {
1711 if (_INB(port + j) != 0xFF)
1712 return 0;
1713 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1714 }
1715
1716 _OUTB(port, 0x08); /* halt CPU */
1717 _OUTB(port, 0x08);
1718 _OUTB(port, 0x08);
1719 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1720 if (_INB(port) != 0x40)
1721 return 0;
1722 _OUTB(port, 0x00);
1723 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1724 if (_INB(port) != 0x40)
1725 return 0;
1726 _OUTB(port, 0x04);
1727 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1728 if (_INB(port) != 0x44)
1729 return 0;
1730
1731 /* Reset adapter */
1732 _OUTB(port, 0x08);
1733 _OUTB(port, 0x08);
1734 _OUTB(port, 0x08);
1735 _OUTB(port + 1, 0xFF);
1736 return 1;
1737}
1738
1739/*============================================================================
1740 * Detect S502E adapter.
1741 * Following tests are used to verify adapter presence:
1742 * 1. All registers other than status (BASE) should read 0xFF.
1743 * 2. After writing 0 to CPU control register (BASE+3), status register
1744 * (BASE) should read 11111000b.
1745 * 3. After writing 00000100b to port BASE (set bit 2), status register
1746 * (BASE) should read 11111100b.
1747 * Return 1 if detected o.k. or 0 if failed.
1748 * Note: This test is destructive! Adapter will be left in shutdown
1749 * state after the test.
1750 */
1751static int detect_s502e (int port)
1752{
1753 int i, j;
1754
1755 if (!get_option_index(s502_port_options, port))
1756 return 0;
1757 for (j = 1; j < SDLA_MAXIORANGE; ++j) {
1758 if (_INB(port + j) != 0xFF)
1759 return 0;
1760 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1761 }
1762
1763 _OUTB(port + 3, 0); /* CPU control reg. */
1764 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1765 if (_INB(port) != 0xF8) /* read status */
1766 return 0;
1767 _OUTB(port, 0x04); /* set bit 2 */
1768 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1769 if (_INB(port) != 0xFC) /* verify */
1770 return 0;
1771
1772 /* Reset adapter */
1773 _OUTB(port, 0);
1774 return 1;
1775}
1776
1777/*============================================================================
1778 * Detect s503 adapter.
1779 * Following tests are used to verify adapter presence:
1780 * 1. All registers other than status (BASE) should read 0xFF.
1781 * 2. After writing 0 to control register (BASE), status register (BASE)
1782 * should read 11110000b.
1783 * 3. After writing 00000100b (set bit 2) to control register (BASE),
1784 * status register should read 11110010b.
1785 * Return 1 if detected o.k. or 0 if failed.
1786 * Note: This test is destructive! Adapter will be left in shutdown
1787 * state after the test.
1788 */
1789static int detect_s503 (int port)
1790{
1791 int i, j;
1792
1793 if (!get_option_index(s503_port_options, port))
1794 return 0;
1795 for (j = 1; j < SDLA_MAXIORANGE; ++j) {
1796 if (_INB(port + j) != 0xFF)
1797 return 0;
1798 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1799 }
1800
1801 _OUTB(port, 0); /* reset control reg.*/
1802 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1803 if (_INB(port) != 0xF0) /* read status */
1804 return 0;
1805 _OUTB(port, 0x04); /* set bit 2 */
1806 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1807 if (_INB(port) != 0xF2) /* verify */
1808 return 0;
1809
1810 /* Reset adapter */
1811 _OUTB(port, 0);
1812 return 1;
1813}
1814
1815/*============================================================================
1816 * Detect s507 adapter.
1817 * Following tests are used to detect s507 adapter:
1818 * 1. All ports should read the same value.
1819 * 2. After writing 0x00 to control register, status register should read
1820 * ?011000?b.
1821 * 3. After writing 0x01 to control register, status register should read
1822 * ?011001?b.
1823 * Return 1 if detected o.k. or 0 if failed.
1824 * Note: This test is destructive! Adapter will be left in shutdown
1825 * state after the test.
1826 */
1827static int detect_s507 (int port)
1828{
1829 int tmp, i, j;
1830
1831 if (!get_option_index(s508_port_options, port))
1832 return 0;
1833 tmp = _INB(port);
1834 for (j = 1; j < S507_IORANGE; ++j) {
1835 if (_INB(port + j) != tmp)
1836 return 0;
1837 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1838 }
1839
1840 _OUTB(port, 0x00);
1841 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1842 if ((_INB(port) & 0x7E) != 0x30)
1843 return 0;
1844 _OUTB(port, 0x01);
1845 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1846 if ((_INB(port) & 0x7E) != 0x32)
1847 return 0;
1848
1849 /* Reset adapter */
1850 _OUTB(port, 0x00);
1851 return 1;
1852}
1853
1854/*============================================================================
1855 * Detect s508 adapter.
1856 * Following tests are used to detect s508 adapter:
1857 * 1. After writing 0x00 to control register, status register should read
1858 * ??000000b.
1859 * 2. After writing 0x10 to control register, status register should read
1860 * ??010000b
1861 * Return 1 if detected o.k. or 0 if failed.
1862 * Note: This test is destructive! Adapter will be left in shutdown
1863 * state after the test.
1864 */
1865static int detect_s508 (int port)
1866{
1867 int i;
1868
1869 if (!get_option_index(s508_port_options, port))
1870 return 0;
1871 _OUTB(port, 0x00);
1872 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1873 if ((_INB(port + 1) & 0x3F) != 0x00)
1874 return 0;
1875 _OUTB(port, 0x10);
1876 for (i = 0; i < SDLA_IODELAY; ++i); /* delay */
1877 if ((_INB(port + 1) & 0x3F) != 0x10)
1878 return 0;
1879
1880 /* Reset adapter */
1881 _OUTB(port, 0x00);
1882 return 1;
1883}
1884
1885/*============================================================================
1886 * Detect s514 PCI adapter.
1887 * Return 1 if detected o.k. or 0 if failed.
1888 * Note: This test is destructive! Adapter will be left in shutdown
1889 * state after the test.
1890 */
1891static int detect_s514 (sdlahw_t* hw)
1892{
1893 unsigned char CPU_no, slot_no, auto_slot_cfg;
1894 int number_S514_cards = 0;
1895 u32 S514_mem_base_addr = 0;
1896 u32 ut_u32;
1897 struct pci_dev *pci_dev;
1898
1899
1900#ifndef CONFIG_PCI
1901 printk(KERN_INFO "%s: Linux not compiled for PCI usage!\n", modname);
1902 return 0;
1903#endif
1904
1905 /*
1906 The 'setup()' procedure in 'sdlamain.c' passes the CPU number and the
1907 slot number defined in 'router.conf' via the 'port' definition.
1908 */
1909 CPU_no = hw->S514_cpu_no[0];
1910 slot_no = hw->S514_slot_no;
1911 auto_slot_cfg = hw->auto_pci_cfg;
1912
1913 if (auto_slot_cfg){
1914 printk(KERN_INFO "%s: srch... S514 card, CPU %c, Slot=Auto\n",
1915 modname, CPU_no);
1916
1917 }else{
1918 printk(KERN_INFO "%s: srch... S514 card, CPU %c, Slot #%d\n",
1919 modname, CPU_no, slot_no);
1920 }
1921
1922 /* check to see that CPU A or B has been selected in 'router.conf' */
1923 switch(CPU_no) {
1924 case S514_CPU_A:
1925 case S514_CPU_B:
1926 break;
1927
1928 default:
1929 printk(KERN_INFO "%s: S514 CPU definition invalid.\n",
1930 modname);
1931 printk(KERN_INFO "Must be 'A' or 'B'\n");
1932 return 0;
1933 }
1934
1935 number_S514_cards = find_s514_adapter(hw, 0);
1936 if(!number_S514_cards)
1937 return 0;
1938
1939 /* we are using a single S514 adapter with a slot of 0 so re-read the */
1940 /* location of this adapter */
1941 if((number_S514_cards == 1) && auto_slot_cfg) {
1942 number_S514_cards = find_s514_adapter(hw, 1);
1943 if(!number_S514_cards) {
1944 printk(KERN_INFO "%s: Error finding PCI card\n",
1945 modname);
1946 return 0;
1947 }
1948 }
1949
1950 pci_dev = hw->pci_dev;
1951 /* read the physical memory base address */
1952 S514_mem_base_addr = (CPU_no == S514_CPU_A) ?
1953 (pci_dev->resource[1].start) :
1954 (pci_dev->resource[2].start);
1955
1956 printk(KERN_INFO "%s: S514 PCI memory at 0x%X\n",
1957 modname, S514_mem_base_addr);
1958 if(!S514_mem_base_addr) {
1959 if(CPU_no == S514_CPU_B)
1960 printk(KERN_INFO "%s: CPU #B not present on the card\n", modname);
1961 else
1962 printk(KERN_INFO "%s: No PCI memory allocated to card\n", modname);
1963 return 0;
1964 }
1965
1966 /* enable the PCI memory */
1967 pci_read_config_dword(pci_dev,
1968 (CPU_no == S514_CPU_A) ? PCI_MAP0_DWORD : PCI_MAP1_DWORD,
1969 &ut_u32);
1970 pci_write_config_dword(pci_dev,
1971 (CPU_no == S514_CPU_A) ? PCI_MAP0_DWORD : PCI_MAP1_DWORD,
1972 (ut_u32 | PCI_MEMORY_ENABLE));
1973
1974 /* check the IRQ allocated and enable IRQ usage */
1975 if(!(hw->irq = pci_dev->irq)) {
1976 printk(KERN_INFO "%s: IRQ not allocated to S514 adapter\n",
1977 modname);
1978 return 0;
1979 }
1980
1981 /* BUG FIX : Mar 6 2000
1982 * On a initial loading of the card, we must check
1983 * and clear PCI interrupt bits, due to a reset
1984 * problem on some other boards. i.e. An interrupt
1985 * might be pending, even after system bootup,
1986 * in which case, when starting wanrouter the machine
1987 * would crash.
1988 */
1989 if (init_pci_slot(hw))
1990 return 0;
1991
1992 pci_read_config_dword(pci_dev, PCI_INT_CONFIG, &ut_u32);
1993 ut_u32 |= (CPU_no == S514_CPU_A) ?
1994 PCI_ENABLE_IRQ_CPU_A : PCI_ENABLE_IRQ_CPU_B;
1995 pci_write_config_dword(pci_dev, PCI_INT_CONFIG, ut_u32);
1996
1997 printk(KERN_INFO "%s: IRQ %d allocated to the S514 card\n",
1998 modname, hw->irq);
1999
2000 /* map the physical PCI memory to virtual memory */
2001 (void *)hw->dpmbase = ioremap((unsigned long)S514_mem_base_addr,
2002 (unsigned long)MAX_SIZEOF_S514_MEMORY);
2003 /* map the physical control register memory to virtual memory */
2004 hw->vector = (unsigned long)ioremap(
2005 (unsigned long)(S514_mem_base_addr + S514_CTRL_REG_BYTE),
2006 (unsigned long)16);
2007
2008 if(!hw->dpmbase || !hw->vector) {
2009 printk(KERN_INFO "%s: PCI virtual memory allocation failed\n",
2010 modname);
2011 return 0;
2012 }
2013
2014 /* halt the adapter */
2015 writeb (S514_CPU_HALT, hw->vector);
2016
2017 return 1;
2018}
2019
2020/*============================================================================
2021 * Find the S514 PCI adapter in the PCI bus.
2022 * Return the number of S514 adapters found (0 if no adapter found).
2023 */
2024static int find_s514_adapter(sdlahw_t* hw, char find_first_S514_card)
2025{
2026 unsigned char slot_no;
2027 int number_S514_cards = 0;
2028 char S514_found_in_slot = 0;
2029 u16 PCI_subsys_vendor;
2030
2031 struct pci_dev *pci_dev = NULL;
2032
2033 slot_no = hw->S514_slot_no;
2034
2035 while ((pci_dev = pci_find_device(V3_VENDOR_ID, V3_DEVICE_ID, pci_dev))
2036 != NULL) {
2037
2038 pci_read_config_word(pci_dev, PCI_SUBSYS_VENDOR_WORD,
2039 &PCI_subsys_vendor);
2040
2041 if(PCI_subsys_vendor != SANGOMA_SUBSYS_VENDOR)
2042 continue;
2043
2044 hw->pci_dev = pci_dev;
2045
2046 if(find_first_S514_card)
2047 return(1);
2048
2049 number_S514_cards ++;
2050
2051 printk(KERN_INFO
2052 "%s: S514 card found, slot #%d (devfn 0x%X)\n",
2053 modname, ((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK),
2054 pci_dev->devfn);
2055
2056 if (hw->auto_pci_cfg){
2057 hw->S514_slot_no = ((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK);
2058 slot_no = hw->S514_slot_no;
2059
2060 }else if (((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK) == slot_no){
2061 S514_found_in_slot = 1;
2062 break;
2063 }
2064 }
2065
2066 /* if no S514 adapter has been found, then exit */
2067 if (!number_S514_cards) {
2068 printk(KERN_INFO "%s: Error, no S514 adapters found\n", modname);
2069 return 0;
2070 }
2071 /* if more than one S514 card has been found, then the user must have */ /* defined a slot number so that the correct adapter is used */
2072 else if ((number_S514_cards > 1) && hw->auto_pci_cfg) {
2073 printk(KERN_INFO "%s: Error, PCI Slot autodetect Failed! \n"
2074 "%s: More than one S514 adapter found.\n"
2075 "%s: Disable the Autodetect feature and supply\n"
2076 "%s: the PCISLOT numbers for each card.\n",
2077 modname,modname,modname,modname);
2078 return 0;
2079 }
2080 /* if the user has specified a slot number and the S514 adapter has */
2081 /* not been found in that slot, then exit */
2082 else if (!hw->auto_pci_cfg && !S514_found_in_slot) {
2083 printk(KERN_INFO
2084 "%s: Error, S514 card not found in specified slot #%d\n",
2085 modname, slot_no);
2086 return 0;
2087 }
2088
2089 return (number_S514_cards);
2090}
2091
2092
2093
2094/******* Miscellaneous ******************************************************/
2095
2096/*============================================================================
2097 * Calibrate SDLA memory access delay.
2098 * Count number of idle loops made within 1 second and then calculate the
2099 * number of loops that should be made to achive desired delay.
2100 */
2101static int calibrate_delay (int mks)
2102{
2103 unsigned int delay;
2104 unsigned long stop;
2105
2106 for (delay = 0, stop = SYSTEM_TICK + HZ; SYSTEM_TICK < stop; ++delay);
2107 return (delay/(1000000L/mks) + 1);
2108}
2109
2110/*============================================================================
2111 * Get option's index into the options list.
2112 * Return option's index (1 .. N) or zero if option is invalid.
2113 */
2114static int get_option_index (unsigned* optlist, unsigned optval)
2115{
2116 int i;
2117
2118 for (i = 1; i <= optlist[0]; ++i)
2119 if ( optlist[i] == optval)
2120 return i;
2121 return 0;
2122}
2123
2124/*============================================================================
2125 * Check memory region to see if it's available.
2126 * Return: 0 ok.
2127 */
2128static unsigned check_memregion (void* ptr, unsigned len)
2129{
2130 volatile unsigned char* p = ptr;
2131
2132 for (; len && (readb (p) == 0xFF); --len, ++p) {
2133 writeb (0, p); /* attempt to write 0 */
2134 if (readb(p) != 0xFF) { /* still has to read 0xFF */
2135 writeb (0xFF, p);/* restore original value */
2136 break; /* not good */
2137 }
2138 }
2139
2140 return len;
2141}
2142
2143/*============================================================================
2144 * Test memory region.
2145 * Return: size of the region that passed the test.
2146 * Note: Region size must be multiple of 2 !
2147 */
2148static unsigned test_memregion (void* ptr, unsigned len)
2149{
2150 volatile unsigned short* w_ptr;
2151 unsigned len_w = len >> 1; /* region len in words */
2152 unsigned i;
2153
2154 for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
2155 writew (0xAA55, w_ptr);
2156
2157 for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
2158 if (readw (w_ptr) != 0xAA55) {
2159 len_w = i;
2160 break;
2161 }
2162
2163 for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
2164 writew (0x55AA, w_ptr);
2165
2166 for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
2167 if (readw(w_ptr) != 0x55AA) {
2168 len_w = i;
2169 break;
2170 }
2171
2172 for (i = 0, w_ptr = ptr; i < len_w; ++i, ++w_ptr)
2173 writew (0, w_ptr);
2174
2175 return len_w << 1;
2176}
2177
2178/*============================================================================
2179 * Calculate 16-bit CRC using CCITT polynomial.
2180 */
2181static unsigned short checksum (unsigned char* buf, unsigned len)
2182{
2183 unsigned short crc = 0;
2184 unsigned mask, flag;
2185
2186 for (; len; --len, ++buf) {
2187 for (mask = 0x80; mask; mask >>= 1) {
2188 flag = (crc & 0x8000);
2189 crc <<= 1;
2190 crc |= ((*buf & mask) ? 1 : 0);
2191 if (flag) crc ^= 0x1021;
2192 }
2193 }
2194 return crc;
2195}
2196
2197static int init_pci_slot(sdlahw_t *hw)
2198{
2199
2200 u32 int_status;
2201 int volatile found=0;
2202 int i=0;
2203
2204 /* Check if this is a very first load for a specific
2205 * pci card. If it is, clear the interrput bits, and
2206 * set the flag indicating that this card was initialized.
2207 */
2208
2209 for (i=0; (i<MAX_S514_CARDS) && !found; i++){
2210 if (pci_slot_ar[i] == hw->S514_slot_no){
2211 found=1;
2212 break;
2213 }
2214 if (pci_slot_ar[i] == 0xFF){
2215 break;
2216 }
2217 }
2218
2219 if (!found){
2220 read_S514_int_stat(hw,&int_status);
2221 S514_intack(hw,int_status);
2222 if (i == MAX_S514_CARDS){
2223 printk(KERN_INFO "%s: Critical Error !!!\n",modname);
2224 printk(KERN_INFO
2225 "%s: Number of Sangoma PCI cards exceeded maximum limit.\n",
2226 modname);
2227 printk(KERN_INFO "Please contact Sangoma Technologies\n");
2228 return 1;
2229 }
2230 pci_slot_ar[i] = hw->S514_slot_no;
2231 }
2232 return 0;
2233}
2234
2235static int pci_probe(sdlahw_t *hw)
2236{
2237
2238 unsigned char slot_no;
2239 int number_S514_cards = 0;
2240 u16 PCI_subsys_vendor;
2241 u16 PCI_card_type;
2242
2243 struct pci_dev *pci_dev = NULL;
2244 struct pci_bus *bus = NULL;
2245
2246 slot_no = 0;
2247
2248 while ((pci_dev = pci_find_device(V3_VENDOR_ID, V3_DEVICE_ID, pci_dev))
2249 != NULL) {
2250
2251 pci_read_config_word(pci_dev, PCI_SUBSYS_VENDOR_WORD,
2252 &PCI_subsys_vendor);
2253
2254 if(PCI_subsys_vendor != SANGOMA_SUBSYS_VENDOR)
2255 continue;
2256
2257 pci_read_config_word(pci_dev, PCI_CARD_TYPE,
2258 &PCI_card_type);
2259
2260 bus = pci_dev->bus;
2261
2262 /* A dual cpu card can support up to 4 physical connections,
2263 * where a single cpu card can support up to 2 physical
2264 * connections. The FT1 card can only support a single
2265 * connection, however we cannot distinguish between a Single
2266 * CPU card and an FT1 card. */
2267 if (PCI_card_type == S514_DUAL_CPU){
2268 number_S514_cards += 4;
2269 printk(KERN_INFO
2270 "wanpipe: S514-PCI card found, cpu(s) 2, bus #%d, slot #%d, irq #%d\n",
2271 bus->number,((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK),
2272 pci_dev->irq);
2273 }else{
2274 number_S514_cards += 2;
2275 printk(KERN_INFO
2276 "wanpipe: S514-PCI card found, cpu(s) 1, bus #%d, slot #%d, irq #%d\n",
2277 bus->number,((pci_dev->devfn >> 3) & PCI_DEV_SLOT_MASK),
2278 pci_dev->irq);
2279 }
2280 }
2281
2282 return number_S514_cards;
2283
2284}
2285
2286
2287
2288EXPORT_SYMBOL(wanpipe_hw_probe);
2289
2290unsigned wanpipe_hw_probe(void)
2291{
2292 sdlahw_t hw;
2293 unsigned* opt = s508_port_options;
2294 unsigned cardno=0;
2295 int i;
2296
2297 memset(&hw, 0, sizeof(hw));
2298
2299 for (i = 1; i <= opt[0]; i++) {
2300 if (detect_s508(opt[i])){
2301 /* S508 card can support up to two physical links */
2302 cardno+=2;
2303 printk(KERN_INFO "wanpipe: S508-ISA card found, port 0x%x\n",opt[i]);
2304 }
2305 }
2306
2307 #ifdef CONFIG_PCI
2308 hw.S514_slot_no = 0;
2309 cardno += pci_probe(&hw);
2310 #else
2311 printk(KERN_INFO "wanpipe: Warning, Kernel not compiled for PCI support!\n");
2312 printk(KERN_INFO "wanpipe: PCI Hardware Probe Failed!\n");
2313 #endif
2314
2315 return cardno;
2316}
2317
2318/****** End *****************************************************************/
diff --git a/drivers/net/wan/sdlamain.c b/drivers/net/wan/sdlamain.c
new file mode 100644
index 000000000000..74e151acef3e
--- /dev/null
+++ b/drivers/net/wan/sdlamain.c
@@ -0,0 +1,1341 @@
1/****************************************************************************
2* sdlamain.c WANPIPE(tm) Multiprotocol WAN Link Driver. Main module.
3*
4* Author: Nenad Corbic <ncorbic@sangoma.com>
5* Gideon Hack
6*
7* Copyright: (c) 1995-2000 Sangoma Technologies Inc.
8*
9* This program is free software; you can redistribute it and/or
10* modify it under the terms of the GNU General Public License
11* as published by the Free Software Foundation; either version
12* 2 of the License, or (at your option) any later version.
13* ============================================================================
14* Dec 22, 2000 Nenad Corbic Updated for 2.4.X kernels.
15* Removed the polling routine.
16* Nov 13, 2000 Nenad Corbic Added hw probing on module load and dynamic
17* device allocation.
18* Nov 7, 2000 Nenad Corbic Fixed the Multi-Port PPP for kernels
19* 2.2.16 and above.
20* Aug 2, 2000 Nenad Corbic Block the Multi-Port PPP from running on
21* kernels 2.2.16 or greater. The SyncPPP
22* has changed.
23* Jul 25, 2000 Nenad Corbic Updated the Piggiback support for MultPPPP.
24* Jul 13, 2000 Nenad Corbic Added Multi-PPP support.
25* Feb 02, 2000 Nenad Corbic Fixed up piggyback probing and selection.
26* Sep 23, 1999 Nenad Corbic Added support for SMP
27* Sep 13, 1999 Nenad Corbic Each port is treated as a separate device.
28* Jun 02, 1999 Gideon Hack Added support for the S514 adapter.
29* Updates for Linux 2.2.X kernels.
30* Sep 17, 1998 Jaspreet Singh Updated for 2.1.121+ kernel
31* Nov 28, 1997 Jaspreet Singh Changed DRV_RELEASE to 1
32* Nov 10, 1997 Jaspreet Singh Changed sti() to restore_flags();
33* Nov 06, 1997 Jaspreet Singh Changed DRV_VERSION to 4 and DRV_RELEASE to 0
34* Oct 20, 1997 Jaspreet Singh Modified sdla_isr routine so that card->in_isr
35* assignments are taken out and placed in the
36* sdla_ppp.c, sdla_fr.c and sdla_x25.c isr
37* routines. Took out 'wandev->tx_int_enabled' and
38* replaced it with 'wandev->enable_tx_int'.
39* May 29, 1997 Jaspreet Singh Flow Control Problem
40* added "wandev->tx_int_enabled=1" line in the
41* init module. This line initializes the flag for
42* preventing Interrupt disabled with device set to
43* busy
44* Jan 15, 1997 Gene Kozin Version 3.1.0
45* o added UDP management stuff
46* Jan 02, 1997 Gene Kozin Initial version.
47*****************************************************************************/
48
49#include <linux/config.h> /* OS configuration options */
50#include <linux/stddef.h> /* offsetof(), etc. */
51#include <linux/errno.h> /* return codes */
52#include <linux/string.h> /* inline memset(), etc. */
53#include <linux/init.h>
54#include <linux/slab.h> /* kmalloc(), kfree() */
55#include <linux/kernel.h> /* printk(), and other useful stuff */
56#include <linux/module.h> /* support for loadable modules */
57#include <linux/ioport.h> /* request_region(), release_region() */
58#include <linux/wanrouter.h> /* WAN router definitions */
59#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
60
61#include <linux/in.h>
62#include <asm/io.h> /* phys_to_virt() */
63#include <linux/pci.h>
64#include <linux/sdlapci.h>
65#include <linux/if_wanpipe_common.h>
66
67#include <asm/uaccess.h> /* kernel <-> user copy */
68#include <linux/inetdevice.h>
69
70#include <linux/ip.h>
71#include <net/route.h>
72
73#define KMEM_SAFETYZONE 8
74
75
76#ifndef CONFIG_WANPIPE_FR
77 #define wpf_init(a,b) (-EPROTONOSUPPORT)
78#endif
79
80#ifndef CONFIG_WANPIPE_CHDLC
81 #define wpc_init(a,b) (-EPROTONOSUPPORT)
82#endif
83
84#ifndef CONFIG_WANPIPE_X25
85 #define wpx_init(a,b) (-EPROTONOSUPPORT)
86#endif
87
88#ifndef CONFIG_WANPIPE_PPP
89 #define wpp_init(a,b) (-EPROTONOSUPPORT)
90#endif
91
92#ifndef CONFIG_WANPIPE_MULTPPP
93 #define wsppp_init(a,b) (-EPROTONOSUPPORT)
94#endif
95
96
97/***********FOR DEBUGGING PURPOSES*********************************************
98static void * dbg_kmalloc(unsigned int size, int prio, int line) {
99 int i = 0;
100 void * v = kmalloc(size+sizeof(unsigned int)+2*KMEM_SAFETYZONE*8,prio);
101 char * c1 = v;
102 c1 += sizeof(unsigned int);
103 *((unsigned int *)v) = size;
104
105 for (i = 0; i < KMEM_SAFETYZONE; i++) {
106 c1[0] = 'D'; c1[1] = 'E'; c1[2] = 'A'; c1[3] = 'D';
107 c1[4] = 'B'; c1[5] = 'E'; c1[6] = 'E'; c1[7] = 'F';
108 c1 += 8;
109 }
110 c1 += size;
111 for (i = 0; i < KMEM_SAFETYZONE; i++) {
112 c1[0] = 'M'; c1[1] = 'U'; c1[2] = 'N'; c1[3] = 'G';
113 c1[4] = 'W'; c1[5] = 'A'; c1[6] = 'L'; c1[7] = 'L';
114 c1 += 8;
115 }
116 v = ((char *)v) + sizeof(unsigned int) + KMEM_SAFETYZONE*8;
117 printk(KERN_INFO "line %d kmalloc(%d,%d) = %p\n",line,size,prio,v);
118 return v;
119}
120static void dbg_kfree(void * v, int line) {
121 unsigned int * sp = (unsigned int *)(((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8));
122 unsigned int size = *sp;
123 char * c1 = ((char *)v) - KMEM_SAFETYZONE*8;
124 int i = 0;
125 for (i = 0; i < KMEM_SAFETYZONE; i++) {
126 if ( c1[0] != 'D' || c1[1] != 'E' || c1[2] != 'A' || c1[3] != 'D'
127 || c1[4] != 'B' || c1[5] != 'E' || c1[6] != 'E' || c1[7] != 'F') {
128 printk(KERN_INFO "kmalloced block at %p has been corrupted (underrun)!\n",v);
129 printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
130 c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
131 }
132 c1 += 8;
133 }
134 c1 += size;
135 for (i = 0; i < KMEM_SAFETYZONE; i++) {
136 if ( c1[0] != 'M' || c1[1] != 'U' || c1[2] != 'N' || c1[3] != 'G'
137 || c1[4] != 'W' || c1[5] != 'A' || c1[6] != 'L' || c1[7] != 'L'
138 ) {
139 printk(KERN_INFO "kmalloced block at %p has been corrupted (overrun):\n",v);
140 printk(KERN_INFO " %4x: %2x %2x %2x %2x %2x %2x %2x %2x\n", i*8,
141 c1[0],c1[1],c1[2],c1[3],c1[4],c1[5],c1[6],c1[7] );
142 }
143 c1 += 8;
144 }
145 printk(KERN_INFO "line %d kfree(%p)\n",line,v);
146 v = ((char *)v) - (sizeof(unsigned int) + KMEM_SAFETYZONE*8);
147 kfree(v);
148}
149
150#define kmalloc(x,y) dbg_kmalloc(x,y,__LINE__)
151#define kfree(x) dbg_kfree(x,__LINE__)
152******************************************************************************/
153
154
155
156/****** Defines & Macros ****************************************************/
157
158#ifdef _DEBUG_
159#define STATIC
160#else
161#define STATIC static
162#endif
163
164#define DRV_VERSION 5 /* version number */
165#define DRV_RELEASE 0 /* release (minor version) number */
166#define MAX_CARDS 16 /* max number of adapters */
167
168#ifndef CONFIG_WANPIPE_CARDS /* configurable option */
169#define CONFIG_WANPIPE_CARDS 1
170#endif
171
172#define CMD_OK 0 /* normal firmware return code */
173#define CMD_TIMEOUT 0xFF /* firmware command timed out */
174#define MAX_CMD_RETRY 10 /* max number of firmware retries */
175/****** Function Prototypes *************************************************/
176
177extern void disable_irq(unsigned int);
178extern void enable_irq(unsigned int);
179
180/* WAN link driver entry points */
181static int setup(struct wan_device* wandev, wandev_conf_t* conf);
182static int shutdown(struct wan_device* wandev);
183static int ioctl(struct wan_device* wandev, unsigned cmd, unsigned long arg);
184
185/* IOCTL handlers */
186static int ioctl_dump (sdla_t* card, sdla_dump_t* u_dump);
187static int ioctl_exec (sdla_t* card, sdla_exec_t* u_exec, int);
188
189/* Miscellaneous functions */
190STATIC irqreturn_t sdla_isr (int irq, void* dev_id, struct pt_regs *regs);
191static void release_hw (sdla_t *card);
192
193static int check_s508_conflicts (sdla_t* card,wandev_conf_t* conf, int*);
194static int check_s514_conflicts (sdla_t* card,wandev_conf_t* conf, int*);
195
196
197/****** Global Data **********************************************************
198 * Note: All data must be explicitly initialized!!!
199 */
200
201/* private data */
202static char drvname[] = "wanpipe";
203static char fullname[] = "WANPIPE(tm) Multiprotocol Driver";
204static char copyright[] = "(c) 1995-2000 Sangoma Technologies Inc.";
205static int ncards;
206static sdla_t* card_array; /* adapter data space */
207
208/* Wanpipe's own workqueue, used for all API's.
209 * All protocol specific tasks will be inserted
210 * into the "wanpipe_wq" workqueue.
211
212 * The kernel workqueue mechanism will execute
213 * all pending tasks in the "wanpipe_wq" workqueue.
214 */
215
216struct workqueue_struct *wanpipe_wq;
217DECLARE_WORK(wanpipe_work, NULL, NULL);
218
219static int wanpipe_bh_critical;
220
221/******* Kernel Loadable Module Entry Points ********************************/
222
223/*============================================================================
224 * Module 'insert' entry point.
225 * o print announcement
226 * o allocate adapter data space
227 * o initialize static data
228 * o register all cards with WAN router
229 * o calibrate SDLA shared memory access delay.
230 *
231 * Return: 0 Ok
232 * < 0 error.
233 * Context: process
234 */
235
236static int __init wanpipe_init(void)
237{
238 int cnt, err = 0;
239
240 printk(KERN_INFO "%s v%u.%u %s\n",
241 fullname, DRV_VERSION, DRV_RELEASE, copyright);
242
243 wanpipe_wq = create_workqueue("wanpipe_wq");
244 if (!wanpipe_wq)
245 return -ENOMEM;
246
247 /* Probe for wanpipe cards and return the number found */
248 printk(KERN_INFO "wanpipe: Probing for WANPIPE hardware.\n");
249 ncards = wanpipe_hw_probe();
250 if (ncards){
251 printk(KERN_INFO "wanpipe: Allocating maximum %i devices: wanpipe%i - wanpipe%i.\n",ncards,1,ncards);
252 }else{
253 printk(KERN_INFO "wanpipe: No S514/S508 cards found, unloading modules!\n");
254 destroy_workqueue(wanpipe_wq);
255 return -ENODEV;
256 }
257
258 /* Verify number of cards and allocate adapter data space */
259 card_array = kmalloc(sizeof(sdla_t) * ncards, GFP_KERNEL);
260 if (card_array == NULL) {
261 destroy_workqueue(wanpipe_wq);
262 return -ENOMEM;
263 }
264
265 memset(card_array, 0, sizeof(sdla_t) * ncards);
266
267 /* Register adapters with WAN router */
268 for (cnt = 0; cnt < ncards; ++ cnt) {
269 sdla_t* card = &card_array[cnt];
270 struct wan_device* wandev = &card->wandev;
271
272 card->next = NULL;
273 sprintf(card->devname, "%s%d", drvname, cnt + 1);
274 wandev->magic = ROUTER_MAGIC;
275 wandev->name = card->devname;
276 wandev->private = card;
277 wandev->enable_tx_int = 0;
278 wandev->setup = &setup;
279 wandev->shutdown = &shutdown;
280 wandev->ioctl = &ioctl;
281 err = register_wan_device(wandev);
282 if (err) {
283 printk(KERN_INFO
284 "%s: %s registration failed with error %d!\n",
285 drvname, card->devname, err);
286 break;
287 }
288 }
289 if (cnt){
290 ncards = cnt; /* adjust actual number of cards */
291 }else {
292 kfree(card_array);
293 destroy_workqueue(wanpipe_wq);
294 printk(KERN_INFO "IN Init Module: NO Cards registered\n");
295 err = -ENODEV;
296 }
297
298 return err;
299}
300
301/*============================================================================
302 * Module 'remove' entry point.
303 * o unregister all adapters from the WAN router
304 * o release all remaining system resources
305 */
306static void __exit wanpipe_cleanup(void)
307{
308 int i;
309
310 if (!ncards)
311 return;
312
313 for (i = 0; i < ncards; ++i) {
314 sdla_t* card = &card_array[i];
315 unregister_wan_device(card->devname);
316 }
317 destroy_workqueue(wanpipe_wq);
318 kfree(card_array);
319
320 printk(KERN_INFO "\nwanpipe: WANPIPE Modules Unloaded.\n");
321}
322
323module_init(wanpipe_init);
324module_exit(wanpipe_cleanup);
325
326/******* WAN Device Driver Entry Points *************************************/
327
328/*============================================================================
329 * Setup/configure WAN link driver.
330 * o check adapter state
331 * o make sure firmware is present in configuration
332 * o make sure I/O port and IRQ are specified
333 * o make sure I/O region is available
334 * o allocate interrupt vector
335 * o setup SDLA hardware
336 * o call appropriate routine to perform protocol-specific initialization
337 * o mark I/O region as used
338 * o if this is the first active card, then schedule background task
339 *
340 * This function is called when router handles ROUTER_SETUP IOCTL. The
341 * configuration structure is in kernel memory (including extended data, if
342 * any).
343 */
344
345static int setup(struct wan_device* wandev, wandev_conf_t* conf)
346{
347 sdla_t* card;
348 int err = 0;
349 int irq=0;
350
351 /* Sanity checks */
352 if ((wandev == NULL) || (wandev->private == NULL) || (conf == NULL)){
353 printk(KERN_INFO
354 "%s: Failed Sdlamain Setup wandev %u, card %u, conf %u !\n",
355 wandev->name,
356 (unsigned int)wandev,(unsigned int)wandev->private,
357 (unsigned int)conf);
358 return -EFAULT;
359 }
360
361 printk(KERN_INFO "%s: Starting WAN Setup\n", wandev->name);
362
363 card = wandev->private;
364 if (wandev->state != WAN_UNCONFIGURED){
365 printk(KERN_INFO "%s: failed sdlamain setup, busy!\n",
366 wandev->name);
367 return -EBUSY; /* already configured */
368 }
369
370 printk(KERN_INFO "\nProcessing WAN device %s...\n", wandev->name);
371
372 /* Initialize the counters for each wandev
373 * Used for counting number of times new_if and
374 * del_if get called.
375 */
376 wandev->del_if_cnt = 0;
377 wandev->new_if_cnt = 0;
378 wandev->config_id = conf->config_id;
379
380 if (!conf->data_size || (conf->data == NULL)) {
381 printk(KERN_INFO
382 "%s: firmware not found in configuration data!\n",
383 wandev->name);
384 return -EINVAL;
385 }
386
387 /* Check for resource conflicts and setup the
388 * card for piggibacking if necessary */
389 if(!conf->S514_CPU_no[0]) {
390 if ((err=check_s508_conflicts(card,conf,&irq)) != 0){
391 return err;
392 }
393 }else {
394 if ((err=check_s514_conflicts(card,conf,&irq)) != 0){
395 return err;
396 }
397 }
398
399 /* If the current card has already been configured
400 * or it's a piggyback card, do not try to allocate
401 * resources.
402 */
403 if (!card->wandev.piggyback && !card->configured){
404
405 /* Configure hardware, load firmware, etc. */
406 memset(&card->hw, 0, sizeof(sdlahw_t));
407
408 /* for an S514 adapter, pass the CPU number and the slot number read */
409 /* from 'router.conf' to the 'sdla_setup()' function via the 'port' */
410 /* parameter */
411 if (conf->S514_CPU_no[0]){
412
413 card->hw.S514_cpu_no[0] = conf->S514_CPU_no[0];
414 card->hw.S514_slot_no = conf->PCI_slot_no;
415 card->hw.auto_pci_cfg = conf->auto_pci_cfg;
416
417 if (card->hw.auto_pci_cfg == WANOPT_YES){
418 printk(KERN_INFO "%s: Setting CPU to %c and Slot to Auto\n",
419 card->devname, card->hw.S514_cpu_no[0]);
420 }else{
421 printk(KERN_INFO "%s: Setting CPU to %c and Slot to %i\n",
422 card->devname, card->hw.S514_cpu_no[0], card->hw.S514_slot_no);
423 }
424
425 }else{
426 /* 508 Card io port and irq initialization */
427 card->hw.port = conf->ioport;
428 card->hw.irq = (conf->irq == 9) ? 2 : conf->irq;
429 }
430
431
432 /* Compute the virtual address of the card in kernel space */
433 if(conf->maddr){
434 card->hw.dpmbase = phys_to_virt(conf->maddr);
435 }else{
436 card->hw.dpmbase = (void *)conf->maddr;
437 }
438
439 card->hw.dpmsize = SDLA_WINDOWSIZE;
440
441 /* set the adapter type if using an S514 adapter */
442 card->hw.type = (conf->S514_CPU_no[0]) ? SDLA_S514 : conf->hw_opt[0];
443 card->hw.pclk = conf->hw_opt[1];
444
445 err = sdla_setup(&card->hw, conf->data, conf->data_size);
446 if (err){
447 printk(KERN_INFO "%s: Hardware setup Failed %i\n",
448 card->devname,err);
449 return err;
450 }
451
452 if(card->hw.type != SDLA_S514)
453 irq = (conf->irq == 2) ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
454 else
455 irq = card->hw.irq;
456
457 /* request an interrupt vector - note that interrupts may be shared */
458 /* when using the S514 PCI adapter */
459
460 if(request_irq(irq, sdla_isr,
461 (card->hw.type == SDLA_S514) ? SA_SHIRQ : 0,
462 wandev->name, card)){
463
464 printk(KERN_INFO "%s: Can't reserve IRQ %d!\n", wandev->name, irq);
465 return -EINVAL;
466 }
467
468 }else{
469 printk(KERN_INFO "%s: Card Configured %lu or Piggybacking %i!\n",
470 wandev->name,card->configured,card->wandev.piggyback);
471 }
472
473
474 if (!card->configured){
475
476 /* Initialize the Spin lock */
477 printk(KERN_INFO "%s: Initializing for SMP\n",wandev->name);
478
479 /* Piggyback spin lock has already been initialized,
480 * in check_s514/s508_conflicts() */
481 if (!card->wandev.piggyback){
482 spin_lock_init(&card->wandev.lock);
483 }
484
485 /* Intialize WAN device data space */
486 wandev->irq = irq;
487 wandev->dma = 0;
488 if(card->hw.type != SDLA_S514){
489 wandev->ioport = card->hw.port;
490 }else{
491 wandev->S514_cpu_no[0] = card->hw.S514_cpu_no[0];
492 wandev->S514_slot_no = card->hw.S514_slot_no;
493 }
494 wandev->maddr = (unsigned long)card->hw.dpmbase;
495 wandev->msize = card->hw.dpmsize;
496 wandev->hw_opt[0] = card->hw.type;
497 wandev->hw_opt[1] = card->hw.pclk;
498 wandev->hw_opt[2] = card->hw.memory;
499 wandev->hw_opt[3] = card->hw.fwid;
500 }
501
502 /* Protocol-specific initialization */
503 switch (card->hw.fwid) {
504
505 case SFID_X25_502:
506 case SFID_X25_508:
507 printk(KERN_INFO "%s: Starting X.25 Protocol Init.\n",
508 card->devname);
509 err = wpx_init(card, conf);
510 break;
511 case SFID_FR502:
512 case SFID_FR508:
513 printk(KERN_INFO "%s: Starting Frame Relay Protocol Init.\n",
514 card->devname);
515 err = wpf_init(card, conf);
516 break;
517 case SFID_PPP502:
518 case SFID_PPP508:
519 printk(KERN_INFO "%s: Starting PPP Protocol Init.\n",
520 card->devname);
521 err = wpp_init(card, conf);
522 break;
523
524 case SFID_CHDLC508:
525 case SFID_CHDLC514:
526 if (conf->ft1){
527 printk(KERN_INFO "%s: Starting FT1 CSU/DSU Config Driver.\n",
528 card->devname);
529 err = wpft1_init(card, conf);
530 break;
531
532 }else if (conf->config_id == WANCONFIG_MPPP){
533 printk(KERN_INFO "%s: Starting Multi-Port PPP Protocol Init.\n",
534 card->devname);
535 err = wsppp_init(card,conf);
536 break;
537
538 }else{
539 printk(KERN_INFO "%s: Starting CHDLC Protocol Init.\n",
540 card->devname);
541 err = wpc_init(card, conf);
542 break;
543 }
544 default:
545 printk(KERN_INFO "%s: Error, Firmware is not supported %X %X!\n",
546 wandev->name,card->hw.fwid,SFID_CHDLC508);
547 err = -EPROTONOSUPPORT;
548 }
549
550 if (err != 0){
551 if (err == -EPROTONOSUPPORT){
552 printk(KERN_INFO
553 "%s: Error, Protocol selected has not been compiled!\n",
554 card->devname);
555 printk(KERN_INFO
556 "%s: Re-configure the kernel and re-build the modules!\n",
557 card->devname);
558 }
559
560 release_hw(card);
561 wandev->state = WAN_UNCONFIGURED;
562 return err;
563 }
564
565
566 /* Reserve I/O region and schedule background task */
567 if(card->hw.type != SDLA_S514 && !card->wandev.piggyback)
568 if (!request_region(card->hw.port, card->hw.io_range,
569 wandev->name)) {
570 printk(KERN_WARNING "port 0x%04x busy\n", card->hw.port);
571 release_hw(card);
572 wandev->state = WAN_UNCONFIGURED;
573 return -EBUSY;
574 }
575
576 /* Only use the polling routine for the X25 protocol */
577
578 card->wandev.critical=0;
579 return 0;
580}
581
582/*==================================================================
583 * configure_s508_card
584 *
585 * For a S508 adapter, check for a possible configuration error in that
586 * we are loading an adapter in the same IO port as a previously loaded S508
587 * card.
588 */
589
590static int check_s508_conflicts (sdla_t* card,wandev_conf_t* conf, int *irq)
591{
592 unsigned long smp_flags;
593 int i;
594
595 if (conf->ioport <= 0) {
596 printk(KERN_INFO
597 "%s: can't configure without I/O port address!\n",
598 card->wandev.name);
599 return -EINVAL;
600 }
601
602 if (conf->irq <= 0) {
603 printk(KERN_INFO "%s: can't configure without IRQ!\n",
604 card->wandev.name);
605 return -EINVAL;
606 }
607
608 if (test_bit(0,&card->configured))
609 return 0;
610
611
612 /* Check for already loaded card with the same IO port and IRQ
613 * If found, copy its hardware configuration and use its
614 * resources (i.e. piggybacking)
615 */
616
617 for (i = 0; i < ncards; i++) {
618 sdla_t *nxt_card = &card_array[i];
619
620 /* Skip the current card ptr */
621 if (nxt_card == card)
622 continue;
623
624
625 /* Find a card that is already configured with the
626 * same IO Port */
627 if ((nxt_card->hw.type == SDLA_S508) &&
628 (nxt_card->hw.port == conf->ioport) &&
629 (nxt_card->next == NULL)){
630
631 /* We found a card the card that has same configuration
632 * as us. This means, that we must setup this card in
633 * piggibacking mode. However, only CHDLC and MPPP protocol
634 * support this setup */
635
636 if ((conf->config_id == WANCONFIG_CHDLC ||
637 conf->config_id == WANCONFIG_MPPP) &&
638 (nxt_card->wandev.config_id == WANCONFIG_CHDLC ||
639 nxt_card->wandev.config_id == WANCONFIG_MPPP)){
640
641 *irq = nxt_card->hw.irq;
642 memcpy(&card->hw, &nxt_card->hw, sizeof(sdlahw_t));
643
644 /* The master could already be running, we must
645 * set this as a critical area */
646 lock_adapter_irq(&nxt_card->wandev.lock, &smp_flags);
647
648 nxt_card->next = card;
649 card->next = nxt_card;
650
651 card->wandev.piggyback = WANOPT_YES;
652
653 /* We must initialise the piggiback spin lock here
654 * since isr will try to lock card->next if it
655 * exists */
656 spin_lock_init(&card->wandev.lock);
657
658 unlock_adapter_irq(&nxt_card->wandev.lock, &smp_flags);
659 break;
660 }else{
661 /* Trying to run piggibacking with a wrong protocol */
662 printk(KERN_INFO "%s: ERROR: Resource busy, ioport: 0x%x\n"
663 "%s: This protocol doesn't support\n"
664 "%s: multi-port operation!\n",
665 card->devname,nxt_card->hw.port,
666 card->devname,card->devname);
667 return -EEXIST;
668 }
669 }
670 }
671
672
673 /* Make sure I/O port region is available only if we are the
674 * master device. If we are running in piggybacking mode,
675 * we will use the resources of the master card. */
676 if (!card->wandev.piggyback) {
677 struct resource *rr =
678 request_region(conf->ioport, SDLA_MAXIORANGE, "sdlamain");
679 release_region(conf->ioport, SDLA_MAXIORANGE);
680
681 if (!rr) {
682 printk(KERN_INFO
683 "%s: I/O region 0x%X - 0x%X is in use!\n",
684 card->wandev.name, conf->ioport,
685 conf->ioport + SDLA_MAXIORANGE - 1);
686 return -EINVAL;
687 }
688 }
689
690 return 0;
691}
692
693/*==================================================================
694 * configure_s514_card
695 *
696 * For a S514 adapter, check for a possible configuration error in that
697 * we are loading an adapter in the same slot as a previously loaded S514
698 * card.
699 */
700
701
702static int check_s514_conflicts(sdla_t* card,wandev_conf_t* conf, int *irq)
703{
704 unsigned long smp_flags;
705 int i;
706
707 if (test_bit(0,&card->configured))
708 return 0;
709
710
711 /* Check for already loaded card with the same IO port and IRQ
712 * If found, copy its hardware configuration and use its
713 * resources (i.e. piggybacking)
714 */
715
716 for (i = 0; i < ncards; i ++) {
717
718 sdla_t* nxt_card = &card_array[i];
719 if(nxt_card == card)
720 continue;
721
722 if((nxt_card->hw.type == SDLA_S514) &&
723 (nxt_card->hw.S514_slot_no == conf->PCI_slot_no) &&
724 (nxt_card->hw.S514_cpu_no[0] == conf->S514_CPU_no[0])&&
725 (nxt_card->next == NULL)){
726
727
728 if ((conf->config_id == WANCONFIG_CHDLC ||
729 conf->config_id == WANCONFIG_MPPP) &&
730 (nxt_card->wandev.config_id == WANCONFIG_CHDLC ||
731 nxt_card->wandev.config_id == WANCONFIG_MPPP)){
732
733 *irq = nxt_card->hw.irq;
734 memcpy(&card->hw, &nxt_card->hw, sizeof(sdlahw_t));
735
736 /* The master could already be running, we must
737 * set this as a critical area */
738 lock_adapter_irq(&nxt_card->wandev.lock,&smp_flags);
739 nxt_card->next = card;
740 card->next = nxt_card;
741
742 card->wandev.piggyback = WANOPT_YES;
743
744 /* We must initialise the piggiback spin lock here
745 * since isr will try to lock card->next if it
746 * exists */
747 spin_lock_init(&card->wandev.lock);
748
749 unlock_adapter_irq(&nxt_card->wandev.lock,&smp_flags);
750
751 }else{
752 /* Trying to run piggibacking with a wrong protocol */
753 printk(KERN_INFO "%s: ERROR: Resource busy: CPU %c PCISLOT %i\n"
754 "%s: This protocol doesn't support\n"
755 "%s: multi-port operation!\n",
756 card->devname,
757 conf->S514_CPU_no[0],conf->PCI_slot_no,
758 card->devname,card->devname);
759 return -EEXIST;
760 }
761 }
762 }
763
764 return 0;
765}
766
767
768
769/*============================================================================
770 * Shut down WAN link driver.
771 * o shut down adapter hardware
772 * o release system resources.
773 *
774 * This function is called by the router when device is being unregistered or
775 * when it handles ROUTER_DOWN IOCTL.
776 */
777static int shutdown(struct wan_device* wandev)
778{
779 sdla_t *card;
780 int err=0;
781
782 /* sanity checks */
783 if ((wandev == NULL) || (wandev->private == NULL)){
784 return -EFAULT;
785 }
786
787 if (wandev->state == WAN_UNCONFIGURED){
788 return 0;
789 }
790
791 card = wandev->private;
792
793 if (card->tty_opt){
794 if (card->tty_open){
795 printk(KERN_INFO
796 "%s: Shutdown Failed: TTY is still open\n",
797 card->devname);
798 return -EBUSY;
799 }
800 }
801
802 wandev->state = WAN_UNCONFIGURED;
803
804 set_bit(PERI_CRIT,(void*)&wandev->critical);
805
806 /* In case of piggibacking, make sure that
807 * we never try to shutdown both devices at the same
808 * time, because they depend on one another */
809
810 if (card->disable_comm){
811 card->disable_comm(card);
812 }
813
814 /* Release Resources */
815 release_hw(card);
816
817 /* only free the allocated I/O range if not an S514 adapter */
818 if (wandev->hw_opt[0] != SDLA_S514 && !card->configured){
819 release_region(card->hw.port, card->hw.io_range);
820 }
821
822 if (!card->configured){
823 memset(&card->hw, 0, sizeof(sdlahw_t));
824 if (card->next){
825 memset(&card->next->hw, 0, sizeof(sdlahw_t));
826 }
827 }
828
829
830 clear_bit(PERI_CRIT,(void*)&wandev->critical);
831 return err;
832}
833
834static void release_hw (sdla_t *card)
835{
836 sdla_t *nxt_card;
837
838
839 /* Check if next device exists */
840 if (card->next){
841 nxt_card = card->next;
842 /* If next device is down then release resources */
843 if (nxt_card->wandev.state == WAN_UNCONFIGURED){
844 if (card->wandev.piggyback){
845 /* If this device is piggyback then use
846 * information of the master device
847 */
848 printk(KERN_INFO "%s: Piggyback shutting down\n",card->devname);
849 sdla_down(&card->next->hw);
850 free_irq(card->wandev.irq, card->next);
851 card->configured = 0;
852 card->next->configured = 0;
853 card->wandev.piggyback = 0;
854 }else{
855 /* Master device shutting down */
856 printk(KERN_INFO "%s: Master shutting down\n",card->devname);
857 sdla_down(&card->hw);
858 free_irq(card->wandev.irq, card);
859 card->configured = 0;
860 card->next->configured = 0;
861 }
862 }else{
863 printk(KERN_INFO "%s: Device still running %i\n",
864 nxt_card->devname,nxt_card->wandev.state);
865
866 card->configured = 1;
867 }
868 }else{
869 printk(KERN_INFO "%s: Master shutting down\n",card->devname);
870 sdla_down(&card->hw);
871 free_irq(card->wandev.irq, card);
872 card->configured = 0;
873 }
874 return;
875}
876
877
878/*============================================================================
879 * Driver I/O control.
880 * o verify arguments
881 * o perform requested action
882 *
883 * This function is called when router handles one of the reserved user
884 * IOCTLs. Note that 'arg' stil points to user address space.
885 */
886static int ioctl(struct wan_device* wandev, unsigned cmd, unsigned long arg)
887{
888 sdla_t* card;
889 int err;
890
891 /* sanity checks */
892 if ((wandev == NULL) || (wandev->private == NULL))
893 return -EFAULT;
894 if (wandev->state == WAN_UNCONFIGURED)
895 return -ENODEV;
896
897 card = wandev->private;
898
899 if(card->hw.type != SDLA_S514){
900 disable_irq(card->hw.irq);
901 }
902
903 if (test_bit(SEND_CRIT, (void*)&wandev->critical)) {
904 return -EAGAIN;
905 }
906
907 switch (cmd) {
908 case WANPIPE_DUMP:
909 err = ioctl_dump(wandev->private, (void*)arg);
910 break;
911
912 case WANPIPE_EXEC:
913 err = ioctl_exec(wandev->private, (void*)arg, cmd);
914 break;
915 default:
916 err = -EINVAL;
917 }
918
919 return err;
920}
921
922/****** Driver IOCTL Handlers ***********************************************/
923
924/*============================================================================
925 * Dump adapter memory to user buffer.
926 * o verify request structure
927 * o copy request structure to kernel data space
928 * o verify length/offset
929 * o verify user buffer
930 * o copy adapter memory image to user buffer
931 *
932 * Note: when dumping memory, this routine switches curent dual-port memory
933 * vector, so care must be taken to avoid racing conditions.
934 */
935static int ioctl_dump (sdla_t* card, sdla_dump_t* u_dump)
936{
937 sdla_dump_t dump;
938 unsigned winsize;
939 unsigned long oldvec; /* DPM window vector */
940 unsigned long smp_flags;
941 int err = 0;
942
943 if(copy_from_user((void*)&dump, (void*)u_dump, sizeof(sdla_dump_t)))
944 return -EFAULT;
945
946 if ((dump.magic != WANPIPE_MAGIC) ||
947 (dump.offset + dump.length > card->hw.memory))
948 return -EINVAL;
949
950 winsize = card->hw.dpmsize;
951
952 if(card->hw.type != SDLA_S514) {
953
954 lock_adapter_irq(&card->wandev.lock, &smp_flags);
955
956 oldvec = card->hw.vector;
957 while (dump.length) {
958 /* current offset */
959 unsigned pos = dump.offset % winsize;
960 /* current vector */
961 unsigned long vec = dump.offset - pos;
962 unsigned len = (dump.length > (winsize - pos)) ?
963 (winsize - pos) : dump.length;
964 /* relocate window */
965 if (sdla_mapmem(&card->hw, vec) != 0) {
966 err = -EIO;
967 break;
968 }
969
970 if(copy_to_user((void *)dump.ptr,
971 (u8 *)card->hw.dpmbase + pos, len)){
972
973 unlock_adapter_irq(&card->wandev.lock, &smp_flags);
974 return -EFAULT;
975 }
976
977 dump.length -= len;
978 dump.offset += len;
979 dump.ptr = (char*)dump.ptr + len;
980 }
981
982 sdla_mapmem(&card->hw, oldvec);/* restore DPM window position */
983 unlock_adapter_irq(&card->wandev.lock, &smp_flags);
984
985 }else {
986
987 if(copy_to_user((void *)dump.ptr,
988 (u8 *)card->hw.dpmbase + dump.offset, dump.length)){
989 return -EFAULT;
990 }
991 }
992
993 return err;
994}
995
996/*============================================================================
997 * Execute adapter firmware command.
998 * o verify request structure
999 * o copy request structure to kernel data space
1000 * o call protocol-specific 'exec' function
1001 */
1002static int ioctl_exec (sdla_t* card, sdla_exec_t* u_exec, int cmd)
1003{
1004 sdla_exec_t exec;
1005 int err=0;
1006
1007 if (card->exec == NULL && cmd == WANPIPE_EXEC){
1008 return -ENODEV;
1009 }
1010
1011 if(copy_from_user((void*)&exec, (void*)u_exec, sizeof(sdla_exec_t)))
1012 return -EFAULT;
1013
1014 if ((exec.magic != WANPIPE_MAGIC) || (exec.cmd == NULL))
1015 return -EINVAL;
1016
1017 switch (cmd) {
1018 case WANPIPE_EXEC:
1019 err = card->exec(card, exec.cmd, exec.data);
1020 break;
1021 }
1022 return err;
1023}
1024
1025/******* Miscellaneous ******************************************************/
1026
1027/*============================================================================
1028 * SDLA Interrupt Service Routine.
1029 * o acknowledge SDLA hardware interrupt.
1030 * o call protocol-specific interrupt service routine, if any.
1031 */
1032STATIC irqreturn_t sdla_isr (int irq, void* dev_id, struct pt_regs *regs)
1033{
1034#define card ((sdla_t*)dev_id)
1035
1036 if(card->hw.type == SDLA_S514) { /* handle interrrupt on S514 */
1037 u32 int_status;
1038 unsigned char CPU_no = card->hw.S514_cpu_no[0];
1039 unsigned char card_found_for_IRQ;
1040 u8 IRQ_count = 0;
1041
1042 for(;;) {
1043
1044 read_S514_int_stat(&card->hw, &int_status);
1045
1046 /* check if the interrupt is for this device */
1047 if(!((unsigned char)int_status &
1048 (IRQ_CPU_A | IRQ_CPU_B)))
1049 return IRQ_HANDLED;
1050
1051 /* if the IRQ is for both CPUs on the same adapter, */
1052 /* then alter the interrupt status so as to handle */
1053 /* one CPU at a time */
1054 if(((unsigned char)int_status & (IRQ_CPU_A | IRQ_CPU_B))
1055 == (IRQ_CPU_A | IRQ_CPU_B)) {
1056 int_status &= (CPU_no == S514_CPU_A) ?
1057 ~IRQ_CPU_B : ~IRQ_CPU_A;
1058 }
1059
1060 card_found_for_IRQ = 0;
1061
1062 /* check to see that the CPU number for this device */
1063 /* corresponds to the interrupt status read */
1064 switch (CPU_no) {
1065 case S514_CPU_A:
1066 if((unsigned char)int_status &
1067 IRQ_CPU_A)
1068 card_found_for_IRQ = 1;
1069 break;
1070
1071 case S514_CPU_B:
1072 if((unsigned char)int_status &
1073 IRQ_CPU_B)
1074 card_found_for_IRQ = 1;
1075 break;
1076 }
1077
1078 /* exit if the interrupt is for another CPU on the */
1079 /* same IRQ */
1080 if(!card_found_for_IRQ)
1081 return IRQ_HANDLED;
1082
1083 if (!card ||
1084 (card->wandev.state == WAN_UNCONFIGURED && !card->configured)){
1085 printk(KERN_INFO
1086 "Received IRQ %d for CPU #%c\n",
1087 irq, CPU_no);
1088 printk(KERN_INFO
1089 "IRQ for unconfigured adapter\n");
1090 S514_intack(&card->hw, int_status);
1091 return IRQ_HANDLED;
1092 }
1093
1094 if (card->in_isr) {
1095 printk(KERN_INFO
1096 "%s: interrupt re-entrancy on IRQ %d\n",
1097 card->devname, card->wandev.irq);
1098 S514_intack(&card->hw, int_status);
1099 return IRQ_HANDLED;
1100 }
1101
1102 spin_lock(&card->wandev.lock);
1103 if (card->next){
1104 spin_lock(&card->next->wandev.lock);
1105 }
1106
1107 S514_intack(&card->hw, int_status);
1108 if (card->isr)
1109 card->isr(card);
1110
1111 if (card->next){
1112 spin_unlock(&card->next->wandev.lock);
1113 }
1114 spin_unlock(&card->wandev.lock);
1115
1116 /* handle a maximum of two interrupts (one for each */
1117 /* CPU on the adapter) before returning */
1118 if((++ IRQ_count) == 2)
1119 return IRQ_HANDLED;
1120 }
1121 }
1122
1123 else { /* handle interrupt on S508 adapter */
1124
1125 if (!card || ((card->wandev.state == WAN_UNCONFIGURED) && !card->configured))
1126 return IRQ_HANDLED;
1127
1128 if (card->in_isr) {
1129 printk(KERN_INFO
1130 "%s: interrupt re-entrancy on IRQ %d!\n",
1131 card->devname, card->wandev.irq);
1132 return IRQ_HANDLED;
1133 }
1134
1135 spin_lock(&card->wandev.lock);
1136 if (card->next){
1137 spin_lock(&card->next->wandev.lock);
1138 }
1139
1140 sdla_intack(&card->hw);
1141 if (card->isr)
1142 card->isr(card);
1143
1144 if (card->next){
1145 spin_unlock(&card->next->wandev.lock);
1146 }
1147 spin_unlock(&card->wandev.lock);
1148
1149 }
1150 return IRQ_HANDLED;
1151#undef card
1152}
1153
1154/*============================================================================
1155 * This routine is called by the protocol-specific modules when network
1156 * interface is being open. The only reason we need this, is because we
1157 * have to call MOD_INC_USE_COUNT, but cannot include 'module.h' where it's
1158 * defined more than once into the same kernel module.
1159 */
1160void wanpipe_open (sdla_t* card)
1161{
1162 ++card->open_cnt;
1163}
1164
1165/*============================================================================
1166 * This routine is called by the protocol-specific modules when network
1167 * interface is being closed. The only reason we need this, is because we
1168 * have to call MOD_DEC_USE_COUNT, but cannot include 'module.h' where it's
1169 * defined more than once into the same kernel module.
1170 */
1171void wanpipe_close (sdla_t* card)
1172{
1173 --card->open_cnt;
1174}
1175
1176/*============================================================================
1177 * Set WAN device state.
1178 */
1179void wanpipe_set_state (sdla_t* card, int state)
1180{
1181 if (card->wandev.state != state) {
1182 switch (state) {
1183 case WAN_CONNECTED:
1184 printk (KERN_INFO "%s: link connected!\n",
1185 card->devname);
1186 break;
1187
1188 case WAN_CONNECTING:
1189 printk (KERN_INFO "%s: link connecting...\n",
1190 card->devname);
1191 break;
1192
1193 case WAN_DISCONNECTED:
1194 printk (KERN_INFO "%s: link disconnected!\n",
1195 card->devname);
1196 break;
1197 }
1198 card->wandev.state = state;
1199 }
1200 card->state_tick = jiffies;
1201}
1202
1203sdla_t * wanpipe_find_card (char *name)
1204{
1205 int cnt;
1206 for (cnt = 0; cnt < ncards; ++ cnt) {
1207 sdla_t* card = &card_array[cnt];
1208 if (!strcmp(card->devname,name))
1209 return card;
1210 }
1211 return NULL;
1212}
1213
1214sdla_t * wanpipe_find_card_num (int num)
1215{
1216 if (num < 1 || num > ncards)
1217 return NULL;
1218 num--;
1219 return &card_array[num];
1220}
1221
1222/*
1223 * @work_pointer: work_struct to be done;
1224 * should already have PREPARE_WORK() or
1225 * INIT_WORK() done on it by caller;
1226 */
1227void wanpipe_queue_work (struct work_struct *work_pointer)
1228{
1229 if (test_and_set_bit(1, (void*)&wanpipe_bh_critical))
1230 printk(KERN_INFO "CRITICAL IN QUEUING WORK\n");
1231
1232 queue_work(wanpipe_wq, work_pointer);
1233 clear_bit(1,(void*)&wanpipe_bh_critical);
1234}
1235
1236void wakeup_sk_bh(struct net_device *dev)
1237{
1238 wanpipe_common_t *chan = dev->priv;
1239
1240 if (test_bit(0,&chan->common_critical))
1241 return;
1242
1243 if (chan->sk && chan->tx_timer){
1244 chan->tx_timer->expires=jiffies+1;
1245 add_timer(chan->tx_timer);
1246 }
1247}
1248
1249int change_dev_flags(struct net_device *dev, unsigned flags)
1250{
1251 struct ifreq if_info;
1252 mm_segment_t fs = get_fs();
1253 int err;
1254
1255 memset(&if_info, 0, sizeof(if_info));
1256 strcpy(if_info.ifr_name, dev->name);
1257 if_info.ifr_flags = flags;
1258
1259 set_fs(get_ds()); /* get user space block */
1260 err = devinet_ioctl(SIOCSIFFLAGS, &if_info);
1261 set_fs(fs);
1262
1263 return err;
1264}
1265
1266unsigned long get_ip_address(struct net_device *dev, int option)
1267{
1268
1269 struct in_ifaddr *ifaddr;
1270 struct in_device *in_dev;
1271
1272 if ((in_dev = __in_dev_get(dev)) == NULL){
1273 return 0;
1274 }
1275
1276 if ((ifaddr = in_dev->ifa_list)== NULL ){
1277 return 0;
1278 }
1279
1280 switch (option){
1281
1282 case WAN_LOCAL_IP:
1283 return ifaddr->ifa_local;
1284 break;
1285
1286 case WAN_POINTOPOINT_IP:
1287 return ifaddr->ifa_address;
1288 break;
1289
1290 case WAN_NETMASK_IP:
1291 return ifaddr->ifa_mask;
1292 break;
1293
1294 case WAN_BROADCAST_IP:
1295 return ifaddr->ifa_broadcast;
1296 break;
1297 default:
1298 return 0;
1299 }
1300
1301 return 0;
1302}
1303
1304void add_gateway(sdla_t *card, struct net_device *dev)
1305{
1306 mm_segment_t oldfs;
1307 struct rtentry route;
1308 int res;
1309
1310 memset((char*)&route,0,sizeof(struct rtentry));
1311
1312 ((struct sockaddr_in *)
1313 &(route.rt_dst))->sin_addr.s_addr = 0;
1314 ((struct sockaddr_in *)
1315 &(route.rt_dst))->sin_family = AF_INET;
1316
1317 ((struct sockaddr_in *)
1318 &(route.rt_genmask))->sin_addr.s_addr = 0;
1319 ((struct sockaddr_in *)
1320 &(route.rt_genmask)) ->sin_family = AF_INET;
1321
1322
1323 route.rt_flags = 0;
1324 route.rt_dev = dev->name;
1325
1326 oldfs = get_fs();
1327 set_fs(get_ds());
1328 res = ip_rt_ioctl(SIOCADDRT,&route);
1329 set_fs(oldfs);
1330
1331 if (res == 0){
1332 printk(KERN_INFO "%s: Gateway added for %s\n",
1333 card->devname,dev->name);
1334 }
1335
1336 return;
1337}
1338
1339MODULE_LICENSE("GPL");
1340
1341/****** End *********************************************************/
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
new file mode 100644
index 000000000000..5380ddfcd7d5
--- /dev/null
+++ b/drivers/net/wan/sealevel.c
@@ -0,0 +1,469 @@
1/*
2 * Sealevel Systems 4021 driver.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * (c) Copyright 1999, 2001 Alan Cox
10 * (c) Copyright 2001 Red Hat Inc.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/net.h>
18#include <linux/skbuff.h>
19#include <linux/netdevice.h>
20#include <linux/if_arp.h>
21#include <linux/delay.h>
22#include <linux/ioport.h>
23#include <linux/init.h>
24#include <net/arp.h>
25
26#include <asm/io.h>
27#include <asm/dma.h>
28#include <asm/byteorder.h>
29#include <net/syncppp.h>
30#include "z85230.h"
31
32
33struct slvl_device
34{
35 void *if_ptr; /* General purpose pointer (used by SPPP) */
36 struct z8530_channel *chan;
37 struct ppp_device pppdev;
38 int channel;
39};
40
41
42struct slvl_board
43{
44 struct slvl_device *dev[2];
45 struct z8530_dev board;
46 int iobase;
47};
48
49/*
50 * Network driver support routines
51 */
52
53/*
54 * Frame receive. Simple for our card as we do sync ppp and there
55 * is no funny garbage involved
56 */
57
58static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
59{
60 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
61 skb_trim(skb, skb->len-2);
62 skb->protocol=htons(ETH_P_WAN_PPP);
63 skb->mac.raw=skb->data;
64 skb->dev=c->netdevice;
65 /*
66 * Send it to the PPP layer. We don't have time to process
67 * it right now.
68 */
69 netif_rx(skb);
70 c->netdevice->last_rx = jiffies;
71}
72
73/*
74 * We've been placed in the UP state
75 */
76
77static int sealevel_open(struct net_device *d)
78{
79 struct slvl_device *slvl=d->priv;
80 int err = -1;
81 int unit = slvl->channel;
82
83 /*
84 * Link layer up.
85 */
86
87 switch(unit)
88 {
89 case 0:
90 err=z8530_sync_dma_open(d, slvl->chan);
91 break;
92 case 1:
93 err=z8530_sync_open(d, slvl->chan);
94 break;
95 }
96
97 if(err)
98 return err;
99 /*
100 * Begin PPP
101 */
102 err=sppp_open(d);
103 if(err)
104 {
105 switch(unit)
106 {
107 case 0:
108 z8530_sync_dma_close(d, slvl->chan);
109 break;
110 case 1:
111 z8530_sync_close(d, slvl->chan);
112 break;
113 }
114 return err;
115 }
116
117 slvl->chan->rx_function=sealevel_input;
118
119 /*
120 * Go go go
121 */
122 netif_start_queue(d);
123 return 0;
124}
125
126static int sealevel_close(struct net_device *d)
127{
128 struct slvl_device *slvl=d->priv;
129 int unit = slvl->channel;
130
131 /*
132 * Discard new frames
133 */
134
135 slvl->chan->rx_function=z8530_null_rx;
136
137 /*
138 * PPP off
139 */
140 sppp_close(d);
141 /*
142 * Link layer down
143 */
144
145 netif_stop_queue(d);
146
147 switch(unit)
148 {
149 case 0:
150 z8530_sync_dma_close(d, slvl->chan);
151 break;
152 case 1:
153 z8530_sync_close(d, slvl->chan);
154 break;
155 }
156 return 0;
157}
158
159static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
160{
161 /* struct slvl_device *slvl=d->priv;
162 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
163 return sppp_do_ioctl(d, ifr,cmd);
164}
165
166static struct net_device_stats *sealevel_get_stats(struct net_device *d)
167{
168 struct slvl_device *slvl=d->priv;
169 if(slvl)
170 return z8530_get_stats(slvl->chan);
171 else
172 return NULL;
173}
174
175/*
176 * Passed PPP frames, fire them downwind.
177 */
178
179static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)
180{
181 struct slvl_device *slvl=d->priv;
182 return z8530_queue_xmit(slvl->chan, skb);
183}
184
185static int sealevel_neigh_setup(struct neighbour *n)
186{
187 if (n->nud_state == NUD_NONE) {
188 n->ops = &arp_broken_ops;
189 n->output = n->ops->output;
190 }
191 return 0;
192}
193
194static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
195{
196 if (p->tbl->family == AF_INET) {
197 p->neigh_setup = sealevel_neigh_setup;
198 p->ucast_probes = 0;
199 p->mcast_probes = 0;
200 }
201 return 0;
202}
203
204static int sealevel_attach(struct net_device *dev)
205{
206 struct slvl_device *sv = dev->priv;
207 sppp_attach(&sv->pppdev);
208 return 0;
209}
210
211static void sealevel_detach(struct net_device *dev)
212{
213 sppp_detach(dev);
214}
215
216static void slvl_setup(struct net_device *d)
217{
218 d->open = sealevel_open;
219 d->stop = sealevel_close;
220 d->init = sealevel_attach;
221 d->uninit = sealevel_detach;
222 d->hard_start_xmit = sealevel_queue_xmit;
223 d->get_stats = sealevel_get_stats;
224 d->set_multicast_list = NULL;
225 d->do_ioctl = sealevel_ioctl;
226 d->neigh_setup = sealevel_neigh_setup_dev;
227 d->set_mac_address = NULL;
228
229}
230
231static inline struct slvl_device *slvl_alloc(int iobase, int irq)
232{
233 struct net_device *d;
234 struct slvl_device *sv;
235
236 d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d",
237 slvl_setup);
238
239 if (!d)
240 return NULL;
241
242 sv = d->priv;
243 sv->if_ptr = &sv->pppdev;
244 sv->pppdev.dev = d;
245 d->base_addr = iobase;
246 d->irq = irq;
247
248 return sv;
249}
250
251
252/*
253 * Allocate and setup Sealevel board.
254 */
255
256static __init struct slvl_board *slvl_init(int iobase, int irq,
257 int txdma, int rxdma, int slow)
258{
259 struct z8530_dev *dev;
260 struct slvl_board *b;
261
262 /*
263 * Get the needed I/O space
264 */
265
266 if(!request_region(iobase, 8, "Sealevel 4021"))
267 {
268 printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase);
269 return NULL;
270 }
271
272 b = kmalloc(sizeof(struct slvl_board), GFP_KERNEL);
273 if(!b)
274 goto fail3;
275
276 memset(b, 0, sizeof(*b));
277 if (!(b->dev[0]= slvl_alloc(iobase, irq)))
278 goto fail2;
279
280 b->dev[0]->chan = &b->board.chanA;
281 b->dev[0]->channel = 0;
282
283 if (!(b->dev[1] = slvl_alloc(iobase, irq)))
284 goto fail1_0;
285
286 b->dev[1]->chan = &b->board.chanB;
287 b->dev[1]->channel = 1;
288
289 dev = &b->board;
290
291 /*
292 * Stuff in the I/O addressing
293 */
294
295 dev->active = 0;
296
297 b->iobase = iobase;
298
299 /*
300 * Select 8530 delays for the old board
301 */
302
303 if(slow)
304 iobase |= Z8530_PORT_SLEEP;
305
306 dev->chanA.ctrlio=iobase+1;
307 dev->chanA.dataio=iobase;
308 dev->chanB.ctrlio=iobase+3;
309 dev->chanB.dataio=iobase+2;
310
311 dev->chanA.irqs=&z8530_nop;
312 dev->chanB.irqs=&z8530_nop;
313
314 /*
315 * Assert DTR enable DMA
316 */
317
318 outb(3|(1<<7), b->iobase+4);
319
320
321 /* We want a fast IRQ for this device. Actually we'd like an even faster
322 IRQ ;) - This is one driver RtLinux is made for */
323
324 if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT, "SeaLevel", dev)<0)
325 {
326 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
327 goto fail1_1;
328 }
329
330 dev->irq=irq;
331 dev->chanA.private=&b->dev[0];
332 dev->chanB.private=&b->dev[1];
333 dev->chanA.netdevice=b->dev[0]->pppdev.dev;
334 dev->chanB.netdevice=b->dev[1]->pppdev.dev;
335 dev->chanA.dev=dev;
336 dev->chanB.dev=dev;
337
338 dev->chanA.txdma=3;
339 dev->chanA.rxdma=1;
340 if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0)
341 goto fail;
342
343 if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0)
344 goto dmafail;
345
346 disable_irq(irq);
347
348 /*
349 * Begin normal initialise
350 */
351
352 if(z8530_init(dev)!=0)
353 {
354 printk(KERN_ERR "Z8530 series device not found.\n");
355 enable_irq(irq);
356 goto dmafail2;
357 }
358 if(dev->type==Z85C30)
359 {
360 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
361 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
362 }
363 else
364 {
365 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
366 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
367 }
368
369 /*
370 * Now we can take the IRQ
371 */
372
373 enable_irq(irq);
374
375 if (register_netdev(b->dev[0]->pppdev.dev))
376 goto dmafail2;
377
378 if (register_netdev(b->dev[1]->pppdev.dev))
379 goto fail_unit;
380
381 z8530_describe(dev, "I/O", iobase);
382 dev->active=1;
383 return b;
384
385fail_unit:
386 unregister_netdev(b->dev[0]->pppdev.dev);
387
388dmafail2:
389 free_dma(dev->chanA.rxdma);
390dmafail:
391 free_dma(dev->chanA.txdma);
392fail:
393 free_irq(irq, dev);
394fail1_1:
395 free_netdev(b->dev[1]->pppdev.dev);
396fail1_0:
397 free_netdev(b->dev[0]->pppdev.dev);
398fail2:
399 kfree(b);
400fail3:
401 release_region(iobase,8);
402 return NULL;
403}
404
405static void __exit slvl_shutdown(struct slvl_board *b)
406{
407 int u;
408
409 z8530_shutdown(&b->board);
410
411 for(u=0; u<2; u++)
412 {
413 struct net_device *d = b->dev[u]->pppdev.dev;
414 unregister_netdev(d);
415 free_netdev(d);
416 }
417
418 free_irq(b->board.irq, &b->board);
419 free_dma(b->board.chanA.rxdma);
420 free_dma(b->board.chanA.txdma);
421 /* DMA off on the card, drop DTR */
422 outb(0, b->iobase);
423 release_region(b->iobase, 8);
424 kfree(b);
425}
426
427
428static int io=0x238;
429static int txdma=1;
430static int rxdma=3;
431static int irq=5;
432static int slow=0;
433
434module_param(io, int, 0);
435MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
436module_param(txdma, int, 0);
437MODULE_PARM_DESC(txdma, "Transmit DMA channel");
438module_param(rxdma, int, 0);
439MODULE_PARM_DESC(rxdma, "Receive DMA channel");
440module_param(irq, int, 0);
441MODULE_PARM_DESC(irq, "The interrupt line setting for the SeaLevel card");
442module_param(slow, bool, 0);
443MODULE_PARM_DESC(slow, "Set this for an older Sealevel card such as the 4012");
444
445MODULE_AUTHOR("Alan Cox");
446MODULE_LICENSE("GPL");
447MODULE_DESCRIPTION("Modular driver for the SeaLevel 4021");
448
449static struct slvl_board *slvl_unit;
450
451static int __init slvl_init_module(void)
452{
453#ifdef MODULE
454 printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n");
455 printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n");
456#endif
457 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
458
459 return slvl_unit ? 0 : -ENODEV;
460}
461
462static void __exit slvl_cleanup_module(void)
463{
464 if(slvl_unit)
465 slvl_shutdown(slvl_unit);
466}
467
468module_init(slvl_init_module);
469module_exit(slvl_cleanup_module);
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
new file mode 100644
index 000000000000..84b65c60c799
--- /dev/null
+++ b/drivers/net/wan/syncppp.c
@@ -0,0 +1,1488 @@
1/*
2 * NET3: A (fairly minimal) implementation of synchronous PPP for Linux
3 * as well as a CISCO HDLC implementation. See the copyright
4 * message below for the original source.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the license, or (at your option) any later version.
10 *
11 * Note however. This code is also used in a different form by FreeBSD.
12 * Therefore when making any non OS specific change please consider
13 * contributing it back to the original author under the terms
14 * below in addition.
15 * -- Alan
16 *
17 * Port for Linux-2.1 by Jan "Yenya" Kasprzak <kas@fi.muni.cz>
18 */
19
20/*
21 * Synchronous PPP/Cisco link level subroutines.
22 * Keepalive protocol implemented in both Cisco and PPP modes.
23 *
24 * Copyright (C) 1994 Cronyx Ltd.
25 * Author: Serge Vakulenko, <vak@zebub.msk.su>
26 *
27 * This software is distributed with NO WARRANTIES, not even the implied
28 * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Authors grant any other persons or organisations permission to use
31 * or modify this software as long as this message is kept with the software,
32 * all derivative works or modified versions.
33 *
34 * Version 1.9, Wed Oct 4 18:58:15 MSK 1995
35 *
36 * $Id: syncppp.c,v 1.18 2000/04/11 05:25:31 asj Exp $
37 */
38#undef DEBUG
39
40#include <linux/config.h>
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/errno.h>
44#include <linux/init.h>
45#include <linux/if_arp.h>
46#include <linux/skbuff.h>
47#include <linux/route.h>
48#include <linux/netdevice.h>
49#include <linux/inetdevice.h>
50#include <linux/random.h>
51#include <linux/pkt_sched.h>
52#include <linux/spinlock.h>
53#include <linux/rcupdate.h>
54
55#include <net/syncppp.h>
56
57#include <asm/byteorder.h>
58#include <asm/uaccess.h>
59
60#define MAXALIVECNT 6 /* max. alive packets */
61
62#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
63#define PPP_UI 0x03 /* Unnumbered Information */
64#define PPP_IP 0x0021 /* Internet Protocol */
65#define PPP_ISO 0x0023 /* ISO OSI Protocol */
66#define PPP_XNS 0x0025 /* Xerox NS Protocol */
67#define PPP_IPX 0x002b /* Novell IPX Protocol */
68#define PPP_LCP 0xc021 /* Link Control Protocol */
69#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */
70
71#define LCP_CONF_REQ 1 /* PPP LCP configure request */
72#define LCP_CONF_ACK 2 /* PPP LCP configure acknowledge */
73#define LCP_CONF_NAK 3 /* PPP LCP configure negative ack */
74#define LCP_CONF_REJ 4 /* PPP LCP configure reject */
75#define LCP_TERM_REQ 5 /* PPP LCP terminate request */
76#define LCP_TERM_ACK 6 /* PPP LCP terminate acknowledge */
77#define LCP_CODE_REJ 7 /* PPP LCP code reject */
78#define LCP_PROTO_REJ 8 /* PPP LCP protocol reject */
79#define LCP_ECHO_REQ 9 /* PPP LCP echo request */
80#define LCP_ECHO_REPLY 10 /* PPP LCP echo reply */
81#define LCP_DISC_REQ 11 /* PPP LCP discard request */
82
83#define LCP_OPT_MRU 1 /* maximum receive unit */
84#define LCP_OPT_ASYNC_MAP 2 /* async control character map */
85#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */
86#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */
87#define LCP_OPT_MAGIC 5 /* magic number */
88#define LCP_OPT_RESERVED 6 /* reserved */
89#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */
90#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */
91
92#define IPCP_CONF_REQ LCP_CONF_REQ /* PPP IPCP configure request */
93#define IPCP_CONF_ACK LCP_CONF_ACK /* PPP IPCP configure acknowledge */
94#define IPCP_CONF_NAK LCP_CONF_NAK /* PPP IPCP configure negative ack */
95#define IPCP_CONF_REJ LCP_CONF_REJ /* PPP IPCP configure reject */
96#define IPCP_TERM_REQ LCP_TERM_REQ /* PPP IPCP terminate request */
97#define IPCP_TERM_ACK LCP_TERM_ACK /* PPP IPCP terminate acknowledge */
98#define IPCP_CODE_REJ LCP_CODE_REJ /* PPP IPCP code reject */
99
100#define CISCO_MULTICAST 0x8f /* Cisco multicast address */
101#define CISCO_UNICAST 0x0f /* Cisco unicast address */
102#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
103#define CISCO_ADDR_REQ 0 /* Cisco address request */
104#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
105#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
106
107struct ppp_header {
108 u8 address;
109 u8 control;
110 u16 protocol;
111};
112#define PPP_HEADER_LEN sizeof (struct ppp_header)
113
114struct lcp_header {
115 u8 type;
116 u8 ident;
117 u16 len;
118};
119#define LCP_HEADER_LEN sizeof (struct lcp_header)
120
121struct cisco_packet {
122 u32 type;
123 u32 par1;
124 u32 par2;
125 u16 rel;
126 u16 time0;
127 u16 time1;
128};
129#define CISCO_PACKET_LEN 18
130#define CISCO_BIG_PACKET_LEN 20
131
132static struct sppp *spppq;
133static struct timer_list sppp_keepalive_timer;
134static DEFINE_SPINLOCK(spppq_lock);
135
136/* global xmit queue for sending packets while spinlock is held */
137static struct sk_buff_head tx_queue;
138
139static void sppp_keepalive (unsigned long dummy);
140static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
141 u8 ident, u16 len, void *data);
142static void sppp_cisco_send (struct sppp *sp, int type, long par1, long par2);
143static void sppp_lcp_input (struct sppp *sp, struct sk_buff *m);
144static void sppp_cisco_input (struct sppp *sp, struct sk_buff *m);
145static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *m);
146static void sppp_lcp_open (struct sppp *sp);
147static void sppp_ipcp_open (struct sppp *sp);
148static int sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
149 int len, u32 *magic);
150static void sppp_cp_timeout (unsigned long arg);
151static char *sppp_lcp_type_name (u8 type);
152static char *sppp_ipcp_type_name (u8 type);
153static void sppp_print_bytes (u8 *p, u16 len);
154
155static int debug;
156
157/* Flush global outgoing packet queue to dev_queue_xmit().
158 *
159 * dev_queue_xmit() must be called with interrupts enabled
160 * which means it can't be called with spinlocks held.
161 * If a packet needs to be sent while a spinlock is held,
162 * then put the packet into tx_queue, and call sppp_flush_xmit()
163 * after spinlock is released.
164 */
165static void sppp_flush_xmit(void)
166{
167 struct sk_buff *skb;
168 while ((skb = skb_dequeue(&tx_queue)) != NULL)
169 dev_queue_xmit(skb);
170}
171
172/*
173 * Interface down stub
174 */
175
176static void if_down(struct net_device *dev)
177{
178 struct sppp *sp = (struct sppp *)sppp_of(dev);
179
180 sp->pp_link_state=SPPP_LINK_DOWN;
181}
182
183/*
184 * Timeout routine activations.
185 */
186
187static void sppp_set_timeout(struct sppp *p,int s)
188{
189 if (! (p->pp_flags & PP_TIMO))
190 {
191 init_timer(&p->pp_timer);
192 p->pp_timer.function=sppp_cp_timeout;
193 p->pp_timer.expires=jiffies+s*HZ;
194 p->pp_timer.data=(unsigned long)p;
195 p->pp_flags |= PP_TIMO;
196 add_timer(&p->pp_timer);
197 }
198}
199
200static void sppp_clear_timeout(struct sppp *p)
201{
202 if (p->pp_flags & PP_TIMO)
203 {
204 del_timer(&p->pp_timer);
205 p->pp_flags &= ~PP_TIMO;
206 }
207}
208
209/**
210 * sppp_input - receive and process a WAN PPP frame
211 * @skb: The buffer to process
212 * @dev: The device it arrived on
213 *
214 * This can be called directly by cards that do not have
215 * timing constraints but is normally called from the network layer
216 * after interrupt servicing to process frames queued via netif_rx().
217 *
218 * We process the options in the card. If the frame is destined for
219 * the protocol stacks then it requeues the frame for the upper level
220 * protocol. If it is a control from it is processed and discarded
221 * here.
222 */
223
224void sppp_input (struct net_device *dev, struct sk_buff *skb)
225{
226 struct ppp_header *h;
227 struct sppp *sp = (struct sppp *)sppp_of(dev);
228 unsigned long flags;
229
230 skb->dev=dev;
231 skb->mac.raw=skb->data;
232
233 if (dev->flags & IFF_RUNNING)
234 {
235 /* Count received bytes, add FCS and one flag */
236 sp->ibytes+= skb->len + 3;
237 sp->ipkts++;
238 }
239
240 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
241 /* Too small packet, drop it. */
242 if (sp->pp_flags & PP_DEBUG)
243 printk (KERN_DEBUG "%s: input packet is too small, %d bytes\n",
244 dev->name, skb->len);
245 kfree_skb(skb);
246 return;
247 }
248
249 /* Get PPP header. */
250 h = (struct ppp_header *)skb->data;
251 skb_pull(skb,sizeof(struct ppp_header));
252
253 spin_lock_irqsave(&sp->lock, flags);
254
255 switch (h->address) {
256 default: /* Invalid PPP packet. */
257 goto invalid;
258 case PPP_ALLSTATIONS:
259 if (h->control != PPP_UI)
260 goto invalid;
261 if (sp->pp_flags & PP_CISCO) {
262 if (sp->pp_flags & PP_DEBUG)
263 printk (KERN_WARNING "%s: PPP packet in Cisco mode <0x%x 0x%x 0x%x>\n",
264 dev->name,
265 h->address, h->control, ntohs (h->protocol));
266 goto drop;
267 }
268 switch (ntohs (h->protocol)) {
269 default:
270 if (sp->lcp.state == LCP_STATE_OPENED)
271 sppp_cp_send (sp, PPP_LCP, LCP_PROTO_REJ,
272 ++sp->pp_seq, skb->len + 2,
273 &h->protocol);
274 if (sp->pp_flags & PP_DEBUG)
275 printk (KERN_WARNING "%s: invalid input protocol <0x%x 0x%x 0x%x>\n",
276 dev->name,
277 h->address, h->control, ntohs (h->protocol));
278 goto drop;
279 case PPP_LCP:
280 sppp_lcp_input (sp, skb);
281 goto drop;
282 case PPP_IPCP:
283 if (sp->lcp.state == LCP_STATE_OPENED)
284 sppp_ipcp_input (sp, skb);
285 else
286 printk(KERN_DEBUG "IPCP when still waiting LCP finish.\n");
287 goto drop;
288 case PPP_IP:
289 if (sp->ipcp.state == IPCP_STATE_OPENED) {
290 if(sp->pp_flags&PP_DEBUG)
291 printk(KERN_DEBUG "Yow an IP frame.\n");
292 skb->protocol=htons(ETH_P_IP);
293 netif_rx(skb);
294 dev->last_rx = jiffies;
295 goto done;
296 }
297 break;
298#ifdef IPX
299 case PPP_IPX:
300 /* IPX IPXCP not implemented yet */
301 if (sp->lcp.state == LCP_STATE_OPENED) {
302 skb->protocol=htons(ETH_P_IPX);
303 netif_rx(skb);
304 dev->last_rx = jiffies;
305 goto done;
306 }
307 break;
308#endif
309 }
310 break;
311 case CISCO_MULTICAST:
312 case CISCO_UNICAST:
313 /* Don't check the control field here (RFC 1547). */
314 if (! (sp->pp_flags & PP_CISCO)) {
315 if (sp->pp_flags & PP_DEBUG)
316 printk (KERN_WARNING "%s: Cisco packet in PPP mode <0x%x 0x%x 0x%x>\n",
317 dev->name,
318 h->address, h->control, ntohs (h->protocol));
319 goto drop;
320 }
321 switch (ntohs (h->protocol)) {
322 default:
323 goto invalid;
324 case CISCO_KEEPALIVE:
325 sppp_cisco_input (sp, skb);
326 goto drop;
327#ifdef CONFIG_INET
328 case ETH_P_IP:
329 skb->protocol=htons(ETH_P_IP);
330 netif_rx(skb);
331 dev->last_rx = jiffies;
332 goto done;
333#endif
334#ifdef CONFIG_IPX
335 case ETH_P_IPX:
336 skb->protocol=htons(ETH_P_IPX);
337 netif_rx(skb);
338 dev->last_rx = jiffies;
339 goto done;
340#endif
341 }
342 break;
343 }
344 goto drop;
345
346invalid:
347 if (sp->pp_flags & PP_DEBUG)
348 printk (KERN_WARNING "%s: invalid input packet <0x%x 0x%x 0x%x>\n",
349 dev->name, h->address, h->control, ntohs (h->protocol));
350drop:
351 kfree_skb(skb);
352done:
353 spin_unlock_irqrestore(&sp->lock, flags);
354 sppp_flush_xmit();
355 return;
356}
357
358EXPORT_SYMBOL(sppp_input);
359
360/*
361 * Handle transmit packets.
362 */
363
364static int sppp_hard_header(struct sk_buff *skb, struct net_device *dev, __u16 type,
365 void *daddr, void *saddr, unsigned int len)
366{
367 struct sppp *sp = (struct sppp *)sppp_of(dev);
368 struct ppp_header *h;
369 skb_push(skb,sizeof(struct ppp_header));
370 h=(struct ppp_header *)skb->data;
371 if(sp->pp_flags&PP_CISCO)
372 {
373 h->address = CISCO_UNICAST;
374 h->control = 0;
375 }
376 else
377 {
378 h->address = PPP_ALLSTATIONS;
379 h->control = PPP_UI;
380 }
381 if(sp->pp_flags & PP_CISCO)
382 {
383 h->protocol = htons(type);
384 }
385 else switch(type)
386 {
387 case ETH_P_IP:
388 h->protocol = htons(PPP_IP);
389 break;
390 case ETH_P_IPX:
391 h->protocol = htons(PPP_IPX);
392 break;
393 }
394 return sizeof(struct ppp_header);
395}
396
397static int sppp_rebuild_header(struct sk_buff *skb)
398{
399 return 0;
400}
401
402/*
403 * Send keepalive packets, every 10 seconds.
404 */
405
406static void sppp_keepalive (unsigned long dummy)
407{
408 struct sppp *sp;
409 unsigned long flags;
410
411 spin_lock_irqsave(&spppq_lock, flags);
412
413 for (sp=spppq; sp; sp=sp->pp_next)
414 {
415 struct net_device *dev = sp->pp_if;
416
417 /* Keepalive mode disabled or channel down? */
418 if (! (sp->pp_flags & PP_KEEPALIVE) ||
419 ! (dev->flags & IFF_UP))
420 continue;
421
422 spin_lock(&sp->lock);
423
424 /* No keepalive in PPP mode if LCP not opened yet. */
425 if (! (sp->pp_flags & PP_CISCO) &&
426 sp->lcp.state != LCP_STATE_OPENED) {
427 spin_unlock(&sp->lock);
428 continue;
429 }
430
431 if (sp->pp_alivecnt == MAXALIVECNT) {
432 /* No keepalive packets got. Stop the interface. */
433 printk (KERN_WARNING "%s: protocol down\n", dev->name);
434 if_down (dev);
435 if (! (sp->pp_flags & PP_CISCO)) {
436 /* Shut down the PPP link. */
437 sp->lcp.magic = jiffies;
438 sp->lcp.state = LCP_STATE_CLOSED;
439 sp->ipcp.state = IPCP_STATE_CLOSED;
440 sppp_clear_timeout (sp);
441 /* Initiate negotiation. */
442 sppp_lcp_open (sp);
443 }
444 }
445 if (sp->pp_alivecnt <= MAXALIVECNT)
446 ++sp->pp_alivecnt;
447 if (sp->pp_flags & PP_CISCO)
448 sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq,
449 sp->pp_rseq);
450 else if (sp->lcp.state == LCP_STATE_OPENED) {
451 long nmagic = htonl (sp->lcp.magic);
452 sp->lcp.echoid = ++sp->pp_seq;
453 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REQ,
454 sp->lcp.echoid, 4, &nmagic);
455 }
456
457 spin_unlock(&sp->lock);
458 }
459 spin_unlock_irqrestore(&spppq_lock, flags);
460 sppp_flush_xmit();
461 sppp_keepalive_timer.expires=jiffies+10*HZ;
462 add_timer(&sppp_keepalive_timer);
463}
464
465/*
466 * Handle incoming PPP Link Control Protocol packets.
467 */
468
469static void sppp_lcp_input (struct sppp *sp, struct sk_buff *skb)
470{
471 struct lcp_header *h;
472 struct net_device *dev = sp->pp_if;
473 int len = skb->len;
474 u8 *p, opt[6];
475 u32 rmagic;
476
477 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
478 if (sp->pp_flags & PP_DEBUG)
479 printk (KERN_WARNING "%s: invalid lcp packet length: %d bytes\n",
480 dev->name, len);
481 return;
482 }
483 h = (struct lcp_header *)skb->data;
484 skb_pull(skb,sizeof(struct lcp_header *));
485
486 if (sp->pp_flags & PP_DEBUG)
487 {
488 char state = '?';
489 switch (sp->lcp.state) {
490 case LCP_STATE_CLOSED: state = 'C'; break;
491 case LCP_STATE_ACK_RCVD: state = 'R'; break;
492 case LCP_STATE_ACK_SENT: state = 'S'; break;
493 case LCP_STATE_OPENED: state = 'O'; break;
494 }
495 printk (KERN_WARNING "%s: lcp input(%c): %d bytes <%s id=%xh len=%xh",
496 dev->name, state, len,
497 sppp_lcp_type_name (h->type), h->ident, ntohs (h->len));
498 if (len > 4)
499 sppp_print_bytes ((u8*) (h+1), len-4);
500 printk (">\n");
501 }
502 if (len > ntohs (h->len))
503 len = ntohs (h->len);
504 switch (h->type) {
505 default:
506 /* Unknown packet type -- send Code-Reject packet. */
507 sppp_cp_send (sp, PPP_LCP, LCP_CODE_REJ, ++sp->pp_seq,
508 skb->len, h);
509 break;
510 case LCP_CONF_REQ:
511 if (len < 4) {
512 if (sp->pp_flags & PP_DEBUG)
513 printk (KERN_DEBUG"%s: invalid lcp configure request packet length: %d bytes\n",
514 dev->name, len);
515 break;
516 }
517 if (len>4 && !sppp_lcp_conf_parse_options (sp, h, len, &rmagic))
518 goto badreq;
519 if (rmagic == sp->lcp.magic) {
520 /* Local and remote magics equal -- loopback? */
521 if (sp->pp_loopcnt >= MAXALIVECNT*5) {
522 printk (KERN_WARNING "%s: loopback\n",
523 dev->name);
524 sp->pp_loopcnt = 0;
525 if (dev->flags & IFF_UP) {
526 if_down (dev);
527 }
528 } else if (sp->pp_flags & PP_DEBUG)
529 printk (KERN_DEBUG "%s: conf req: magic glitch\n",
530 dev->name);
531 ++sp->pp_loopcnt;
532
533 /* MUST send Conf-Nack packet. */
534 rmagic = ~sp->lcp.magic;
535 opt[0] = LCP_OPT_MAGIC;
536 opt[1] = sizeof (opt);
537 opt[2] = rmagic >> 24;
538 opt[3] = rmagic >> 16;
539 opt[4] = rmagic >> 8;
540 opt[5] = rmagic;
541 sppp_cp_send (sp, PPP_LCP, LCP_CONF_NAK,
542 h->ident, sizeof (opt), &opt);
543badreq:
544 switch (sp->lcp.state) {
545 case LCP_STATE_OPENED:
546 /* Initiate renegotiation. */
547 sppp_lcp_open (sp);
548 /* fall through... */
549 case LCP_STATE_ACK_SENT:
550 /* Go to closed state. */
551 sp->lcp.state = LCP_STATE_CLOSED;
552 sp->ipcp.state = IPCP_STATE_CLOSED;
553 }
554 break;
555 }
556 /* Send Configure-Ack packet. */
557 sp->pp_loopcnt = 0;
558 if (sp->lcp.state != LCP_STATE_OPENED) {
559 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
560 h->ident, len-4, h+1);
561 }
562 /* Change the state. */
563 switch (sp->lcp.state) {
564 case LCP_STATE_CLOSED:
565 sp->lcp.state = LCP_STATE_ACK_SENT;
566 break;
567 case LCP_STATE_ACK_RCVD:
568 sp->lcp.state = LCP_STATE_OPENED;
569 sppp_ipcp_open (sp);
570 break;
571 case LCP_STATE_OPENED:
572 /* Remote magic changed -- close session. */
573 sp->lcp.state = LCP_STATE_CLOSED;
574 sp->ipcp.state = IPCP_STATE_CLOSED;
575 /* Initiate renegotiation. */
576 sppp_lcp_open (sp);
577 /* Send ACK after our REQ in attempt to break loop */
578 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
579 h->ident, len-4, h+1);
580 sp->lcp.state = LCP_STATE_ACK_SENT;
581 break;
582 }
583 break;
584 case LCP_CONF_ACK:
585 if (h->ident != sp->lcp.confid)
586 break;
587 sppp_clear_timeout (sp);
588 if ((sp->pp_link_state != SPPP_LINK_UP) &&
589 (dev->flags & IFF_UP)) {
590 /* Coming out of loopback mode. */
591 sp->pp_link_state=SPPP_LINK_UP;
592 printk (KERN_INFO "%s: protocol up\n", dev->name);
593 }
594 switch (sp->lcp.state) {
595 case LCP_STATE_CLOSED:
596 sp->lcp.state = LCP_STATE_ACK_RCVD;
597 sppp_set_timeout (sp, 5);
598 break;
599 case LCP_STATE_ACK_SENT:
600 sp->lcp.state = LCP_STATE_OPENED;
601 sppp_ipcp_open (sp);
602 break;
603 }
604 break;
605 case LCP_CONF_NAK:
606 if (h->ident != sp->lcp.confid)
607 break;
608 p = (u8*) (h+1);
609 if (len>=10 && p[0] == LCP_OPT_MAGIC && p[1] >= 4) {
610 rmagic = (u32)p[2] << 24 |
611 (u32)p[3] << 16 | p[4] << 8 | p[5];
612 if (rmagic == ~sp->lcp.magic) {
613 int newmagic;
614 if (sp->pp_flags & PP_DEBUG)
615 printk (KERN_DEBUG "%s: conf nak: magic glitch\n",
616 dev->name);
617 get_random_bytes(&newmagic, sizeof(newmagic));
618 sp->lcp.magic += newmagic;
619 } else
620 sp->lcp.magic = rmagic;
621 }
622 if (sp->lcp.state != LCP_STATE_ACK_SENT) {
623 /* Go to closed state. */
624 sp->lcp.state = LCP_STATE_CLOSED;
625 sp->ipcp.state = IPCP_STATE_CLOSED;
626 }
627 /* The link will be renegotiated after timeout,
628 * to avoid endless req-nack loop. */
629 sppp_clear_timeout (sp);
630 sppp_set_timeout (sp, 2);
631 break;
632 case LCP_CONF_REJ:
633 if (h->ident != sp->lcp.confid)
634 break;
635 sppp_clear_timeout (sp);
636 /* Initiate renegotiation. */
637 sppp_lcp_open (sp);
638 if (sp->lcp.state != LCP_STATE_ACK_SENT) {
639 /* Go to closed state. */
640 sp->lcp.state = LCP_STATE_CLOSED;
641 sp->ipcp.state = IPCP_STATE_CLOSED;
642 }
643 break;
644 case LCP_TERM_REQ:
645 sppp_clear_timeout (sp);
646 /* Send Terminate-Ack packet. */
647 sppp_cp_send (sp, PPP_LCP, LCP_TERM_ACK, h->ident, 0, NULL);
648 /* Go to closed state. */
649 sp->lcp.state = LCP_STATE_CLOSED;
650 sp->ipcp.state = IPCP_STATE_CLOSED;
651 /* Initiate renegotiation. */
652 sppp_lcp_open (sp);
653 break;
654 case LCP_TERM_ACK:
655 case LCP_CODE_REJ:
656 case LCP_PROTO_REJ:
657 /* Ignore for now. */
658 break;
659 case LCP_DISC_REQ:
660 /* Discard the packet. */
661 break;
662 case LCP_ECHO_REQ:
663 if (sp->lcp.state != LCP_STATE_OPENED)
664 break;
665 if (len < 8) {
666 if (sp->pp_flags & PP_DEBUG)
667 printk (KERN_WARNING "%s: invalid lcp echo request packet length: %d bytes\n",
668 dev->name, len);
669 break;
670 }
671 if (ntohl (*(long*)(h+1)) == sp->lcp.magic) {
672 /* Line loopback mode detected. */
673 printk (KERN_WARNING "%s: loopback\n", dev->name);
674 if_down (dev);
675
676 /* Shut down the PPP link. */
677 sp->lcp.state = LCP_STATE_CLOSED;
678 sp->ipcp.state = IPCP_STATE_CLOSED;
679 sppp_clear_timeout (sp);
680 /* Initiate negotiation. */
681 sppp_lcp_open (sp);
682 break;
683 }
684 *(long*)(h+1) = htonl (sp->lcp.magic);
685 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REPLY, h->ident, len-4, h+1);
686 break;
687 case LCP_ECHO_REPLY:
688 if (h->ident != sp->lcp.echoid)
689 break;
690 if (len < 8) {
691 if (sp->pp_flags & PP_DEBUG)
692 printk (KERN_WARNING "%s: invalid lcp echo reply packet length: %d bytes\n",
693 dev->name, len);
694 break;
695 }
696 if (ntohl (*(long*)(h+1)) != sp->lcp.magic)
697 sp->pp_alivecnt = 0;
698 break;
699 }
700}
701
702/*
703 * Handle incoming Cisco keepalive protocol packets.
704 */
705
706static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
707{
708 struct cisco_packet *h;
709 struct net_device *dev = sp->pp_if;
710
711 if (!pskb_may_pull(skb, sizeof(struct cisco_packet))
712 || (skb->len != CISCO_PACKET_LEN
713 && skb->len != CISCO_BIG_PACKET_LEN)) {
714 if (sp->pp_flags & PP_DEBUG)
715 printk (KERN_WARNING "%s: invalid cisco packet length: %d bytes\n",
716 dev->name, skb->len);
717 return;
718 }
719 h = (struct cisco_packet *)skb->data;
720 skb_pull(skb, sizeof(struct cisco_packet*));
721 if (sp->pp_flags & PP_DEBUG)
722 printk (KERN_WARNING "%s: cisco input: %d bytes <%xh %xh %xh %xh %xh-%xh>\n",
723 dev->name, skb->len,
724 ntohl (h->type), h->par1, h->par2, h->rel,
725 h->time0, h->time1);
726 switch (ntohl (h->type)) {
727 default:
728 if (sp->pp_flags & PP_DEBUG)
729 printk (KERN_WARNING "%s: unknown cisco packet type: 0x%x\n",
730 dev->name, ntohl (h->type));
731 break;
732 case CISCO_ADDR_REPLY:
733 /* Reply on address request, ignore */
734 break;
735 case CISCO_KEEPALIVE_REQ:
736 sp->pp_alivecnt = 0;
737 sp->pp_rseq = ntohl (h->par1);
738 if (sp->pp_seq == sp->pp_rseq) {
739 /* Local and remote sequence numbers are equal.
740 * Probably, the line is in loopback mode. */
741 int newseq;
742 if (sp->pp_loopcnt >= MAXALIVECNT) {
743 printk (KERN_WARNING "%s: loopback\n",
744 dev->name);
745 sp->pp_loopcnt = 0;
746 if (dev->flags & IFF_UP) {
747 if_down (dev);
748 }
749 }
750 ++sp->pp_loopcnt;
751
752 /* Generate new local sequence number */
753 get_random_bytes(&newseq, sizeof(newseq));
754 sp->pp_seq ^= newseq;
755 break;
756 }
757 sp->pp_loopcnt = 0;
758 if (sp->pp_link_state==SPPP_LINK_DOWN &&
759 (dev->flags & IFF_UP)) {
760 sp->pp_link_state=SPPP_LINK_UP;
761 printk (KERN_INFO "%s: protocol up\n", dev->name);
762 }
763 break;
764 case CISCO_ADDR_REQ:
765 /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
766 {
767 struct in_device *in_dev;
768 struct in_ifaddr *ifa;
769 u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */
770#ifdef CONFIG_INET
771 rcu_read_lock();
772 if ((in_dev = __in_dev_get(dev)) != NULL)
773 {
774 for (ifa=in_dev->ifa_list; ifa != NULL;
775 ifa=ifa->ifa_next) {
776 if (strcmp(dev->name, ifa->ifa_label) == 0)
777 {
778 addr = ifa->ifa_local;
779 mask = ifa->ifa_mask;
780 break;
781 }
782 }
783 }
784 rcu_read_unlock();
785#endif
786 /* I hope both addr and mask are in the net order */
787 sppp_cisco_send (sp, CISCO_ADDR_REPLY, addr, mask);
788 break;
789 }
790 }
791}
792
793
794/*
795 * Send PPP LCP packet.
796 */
797
798static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
799 u8 ident, u16 len, void *data)
800{
801 struct ppp_header *h;
802 struct lcp_header *lh;
803 struct sk_buff *skb;
804 struct net_device *dev = sp->pp_if;
805
806 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+LCP_HEADER_LEN+len,
807 GFP_ATOMIC);
808 if (skb==NULL)
809 return;
810
811 skb_reserve(skb,dev->hard_header_len);
812
813 h = (struct ppp_header *)skb_put(skb, sizeof(struct ppp_header));
814 h->address = PPP_ALLSTATIONS; /* broadcast address */
815 h->control = PPP_UI; /* Unnumbered Info */
816 h->protocol = htons (proto); /* Link Control Protocol */
817
818 lh = (struct lcp_header *)skb_put(skb, sizeof(struct lcp_header));
819 lh->type = type;
820 lh->ident = ident;
821 lh->len = htons (LCP_HEADER_LEN + len);
822
823 if (len)
824 memcpy(skb_put(skb,len),data, len);
825
826 if (sp->pp_flags & PP_DEBUG) {
827 printk (KERN_WARNING "%s: %s output <%s id=%xh len=%xh",
828 dev->name,
829 proto==PPP_LCP ? "lcp" : "ipcp",
830 proto==PPP_LCP ? sppp_lcp_type_name (lh->type) :
831 sppp_ipcp_type_name (lh->type), lh->ident,
832 ntohs (lh->len));
833 if (len)
834 sppp_print_bytes ((u8*) (lh+1), len);
835 printk (">\n");
836 }
837 sp->obytes += skb->len;
838 /* Control is high priority so it doesn't get queued behind data */
839 skb->priority=TC_PRIO_CONTROL;
840 skb->dev = dev;
841 skb_queue_tail(&tx_queue, skb);
842}
843
844/*
845 * Send Cisco keepalive packet.
846 */
847
848static void sppp_cisco_send (struct sppp *sp, int type, long par1, long par2)
849{
850 struct ppp_header *h;
851 struct cisco_packet *ch;
852 struct sk_buff *skb;
853 struct net_device *dev = sp->pp_if;
854 u32 t = jiffies * 1000/HZ;
855
856 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+CISCO_PACKET_LEN,
857 GFP_ATOMIC);
858
859 if(skb==NULL)
860 return;
861
862 skb_reserve(skb, dev->hard_header_len);
863 h = (struct ppp_header *)skb_put (skb, sizeof(struct ppp_header));
864 h->address = CISCO_MULTICAST;
865 h->control = 0;
866 h->protocol = htons (CISCO_KEEPALIVE);
867
868 ch = (struct cisco_packet*)skb_put(skb, CISCO_PACKET_LEN);
869 ch->type = htonl (type);
870 ch->par1 = htonl (par1);
871 ch->par2 = htonl (par2);
872 ch->rel = -1;
873 ch->time0 = htons ((u16) (t >> 16));
874 ch->time1 = htons ((u16) t);
875
876 if (sp->pp_flags & PP_DEBUG)
877 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
878 dev->name, ntohl (ch->type), ch->par1,
879 ch->par2, ch->rel, ch->time0, ch->time1);
880 sp->obytes += skb->len;
881 skb->priority=TC_PRIO_CONTROL;
882 skb->dev = dev;
883 skb_queue_tail(&tx_queue, skb);
884}
885
886/**
887 * sppp_close - close down a synchronous PPP or Cisco HDLC link
888 * @dev: The network device to drop the link of
889 *
890 * This drops the logical interface to the channel. It is not
891 * done politely as we assume we will also be dropping DTR. Any
892 * timeouts are killed.
893 */
894
895int sppp_close (struct net_device *dev)
896{
897 struct sppp *sp = (struct sppp *)sppp_of(dev);
898 unsigned long flags;
899
900 spin_lock_irqsave(&sp->lock, flags);
901 sp->pp_link_state = SPPP_LINK_DOWN;
902 sp->lcp.state = LCP_STATE_CLOSED;
903 sp->ipcp.state = IPCP_STATE_CLOSED;
904 sppp_clear_timeout (sp);
905 spin_unlock_irqrestore(&sp->lock, flags);
906
907 return 0;
908}
909
910EXPORT_SYMBOL(sppp_close);
911
912/**
913 * sppp_open - open a synchronous PPP or Cisco HDLC link
914 * @dev: Network device to activate
915 *
916 * Close down any existing synchronous session and commence
917 * from scratch. In the PPP case this means negotiating LCP/IPCP
918 * and friends, while for Cisco HDLC we simply need to start sending
919 * keepalives
920 */
921
922int sppp_open (struct net_device *dev)
923{
924 struct sppp *sp = (struct sppp *)sppp_of(dev);
925 unsigned long flags;
926
927 sppp_close(dev);
928
929 spin_lock_irqsave(&sp->lock, flags);
930 if (!(sp->pp_flags & PP_CISCO)) {
931 sppp_lcp_open (sp);
932 }
933 sp->pp_link_state = SPPP_LINK_DOWN;
934 spin_unlock_irqrestore(&sp->lock, flags);
935 sppp_flush_xmit();
936
937 return 0;
938}
939
940EXPORT_SYMBOL(sppp_open);
941
942/**
943 * sppp_reopen - notify of physical link loss
944 * @dev: Device that lost the link
945 *
946 * This function informs the synchronous protocol code that
947 * the underlying link died (for example a carrier drop on X.21)
948 *
949 * We increment the magic numbers to ensure that if the other end
950 * failed to notice we will correctly start a new session. It happens
951 * do to the nature of telco circuits is that you can lose carrier on
952 * one endonly.
953 *
954 * Having done this we go back to negotiating. This function may
955 * be called from an interrupt context.
956 */
957
958int sppp_reopen (struct net_device *dev)
959{
960 struct sppp *sp = (struct sppp *)sppp_of(dev);
961 unsigned long flags;
962
963 sppp_close(dev);
964
965 spin_lock_irqsave(&sp->lock, flags);
966 if (!(sp->pp_flags & PP_CISCO))
967 {
968 sp->lcp.magic = jiffies;
969 ++sp->pp_seq;
970 sp->lcp.state = LCP_STATE_CLOSED;
971 sp->ipcp.state = IPCP_STATE_CLOSED;
972 /* Give it a moment for the line to settle then go */
973 sppp_set_timeout (sp, 1);
974 }
975 sp->pp_link_state=SPPP_LINK_DOWN;
976 spin_unlock_irqrestore(&sp->lock, flags);
977
978 return 0;
979}
980
981EXPORT_SYMBOL(sppp_reopen);
982
983/**
984 * sppp_change_mtu - Change the link MTU
985 * @dev: Device to change MTU on
986 * @new_mtu: New MTU
987 *
988 * Change the MTU on the link. This can only be called with
989 * the link down. It returns an error if the link is up or
990 * the mtu is out of range.
991 */
992
993int sppp_change_mtu(struct net_device *dev, int new_mtu)
994{
995 if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
996 return -EINVAL;
997 dev->mtu=new_mtu;
998 return 0;
999}
1000
1001EXPORT_SYMBOL(sppp_change_mtu);
1002
1003/**
1004 * sppp_do_ioctl - Ioctl handler for ppp/hdlc
1005 * @dev: Device subject to ioctl
1006 * @ifr: Interface request block from the user
1007 * @cmd: Command that is being issued
1008 *
1009 * This function handles the ioctls that may be issued by the user
1010 * to control the settings of a PPP/HDLC link. It does both busy
1011 * and security checks. This function is intended to be wrapped by
1012 * callers who wish to add additional ioctl calls of their own.
1013 */
1014
1015int sppp_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1016{
1017 struct sppp *sp = (struct sppp *)sppp_of(dev);
1018
1019 if(dev->flags&IFF_UP)
1020 return -EBUSY;
1021
1022 if(!capable(CAP_NET_ADMIN))
1023 return -EPERM;
1024
1025 switch(cmd)
1026 {
1027 case SPPPIOCCISCO:
1028 sp->pp_flags|=PP_CISCO;
1029 dev->type = ARPHRD_HDLC;
1030 break;
1031 case SPPPIOCPPP:
1032 sp->pp_flags&=~PP_CISCO;
1033 dev->type = ARPHRD_PPP;
1034 break;
1035 case SPPPIOCDEBUG:
1036 sp->pp_flags&=~PP_DEBUG;
1037 if(ifr->ifr_flags)
1038 sp->pp_flags|=PP_DEBUG;
1039 break;
1040 case SPPPIOCGFLAGS:
1041 if(copy_to_user(ifr->ifr_data, &sp->pp_flags, sizeof(sp->pp_flags)))
1042 return -EFAULT;
1043 break;
1044 case SPPPIOCSFLAGS:
1045 if(copy_from_user(&sp->pp_flags, ifr->ifr_data, sizeof(sp->pp_flags)))
1046 return -EFAULT;
1047 break;
1048 default:
1049 return -EINVAL;
1050 }
1051 return 0;
1052}
1053
1054EXPORT_SYMBOL(sppp_do_ioctl);
1055
1056/**
1057 * sppp_attach - attach synchronous PPP/HDLC to a device
1058 * @pd: PPP device to initialise
1059 *
1060 * This initialises the PPP/HDLC support on an interface. At the
1061 * time of calling the dev element must point to the network device
1062 * that this interface is attached to. The interface should not yet
1063 * be registered.
1064 */
1065
1066void sppp_attach(struct ppp_device *pd)
1067{
1068 struct net_device *dev = pd->dev;
1069 struct sppp *sp = &pd->sppp;
1070 unsigned long flags;
1071
1072 /* Make sure embedding is safe for sppp_of */
1073 BUG_ON(sppp_of(dev) != sp);
1074
1075 spin_lock_irqsave(&spppq_lock, flags);
1076 /* Initialize keepalive handler. */
1077 if (! spppq)
1078 {
1079 init_timer(&sppp_keepalive_timer);
1080 sppp_keepalive_timer.expires=jiffies+10*HZ;
1081 sppp_keepalive_timer.function=sppp_keepalive;
1082 add_timer(&sppp_keepalive_timer);
1083 }
1084 /* Insert new entry into the keepalive list. */
1085 sp->pp_next = spppq;
1086 spppq = sp;
1087 spin_unlock_irqrestore(&spppq_lock, flags);
1088
1089 sp->pp_loopcnt = 0;
1090 sp->pp_alivecnt = 0;
1091 sp->pp_seq = 0;
1092 sp->pp_rseq = 0;
1093 sp->pp_flags = PP_KEEPALIVE|PP_CISCO|debug;/*PP_DEBUG;*/
1094 sp->lcp.magic = 0;
1095 sp->lcp.state = LCP_STATE_CLOSED;
1096 sp->ipcp.state = IPCP_STATE_CLOSED;
1097 sp->pp_if = dev;
1098 spin_lock_init(&sp->lock);
1099
1100 /*
1101 * Device specific setup. All but interrupt handler and
1102 * hard_start_xmit.
1103 */
1104
1105 dev->hard_header = sppp_hard_header;
1106 dev->rebuild_header = sppp_rebuild_header;
1107 dev->tx_queue_len = 10;
1108 dev->type = ARPHRD_HDLC;
1109 dev->addr_len = 0;
1110 dev->hard_header_len = sizeof(struct ppp_header);
1111 dev->mtu = PPP_MTU;
1112 /*
1113 * These 4 are callers but MUST also call sppp_ functions
1114 */
1115 dev->do_ioctl = sppp_do_ioctl;
1116#if 0
1117 dev->get_stats = NULL; /* Let the driver override these */
1118 dev->open = sppp_open;
1119 dev->stop = sppp_close;
1120#endif
1121 dev->change_mtu = sppp_change_mtu;
1122 dev->hard_header_cache = NULL;
1123 dev->header_cache_update = NULL;
1124 dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
1125}
1126
1127EXPORT_SYMBOL(sppp_attach);
1128
1129/**
1130 * sppp_detach - release PPP resources from a device
1131 * @dev: Network device to release
1132 *
1133 * Stop and free up any PPP/HDLC resources used by this
1134 * interface. This must be called before the device is
1135 * freed.
1136 */
1137
1138void sppp_detach (struct net_device *dev)
1139{
1140 struct sppp **q, *p, *sp = (struct sppp *)sppp_of(dev);
1141 unsigned long flags;
1142
1143 spin_lock_irqsave(&spppq_lock, flags);
1144 /* Remove the entry from the keepalive list. */
1145 for (q = &spppq; (p = *q); q = &p->pp_next)
1146 if (p == sp) {
1147 *q = p->pp_next;
1148 break;
1149 }
1150
1151 /* Stop keepalive handler. */
1152 if (! spppq)
1153 del_timer(&sppp_keepalive_timer);
1154 sppp_clear_timeout (sp);
1155 spin_unlock_irqrestore(&spppq_lock, flags);
1156}
1157
1158EXPORT_SYMBOL(sppp_detach);
1159
1160/*
1161 * Analyze the LCP Configure-Request options list
1162 * for the presence of unknown options.
1163 * If the request contains unknown options, build and
1164 * send Configure-reject packet, containing only unknown options.
1165 */
1166static int
1167sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
1168 int len, u32 *magic)
1169{
1170 u8 *buf, *r, *p;
1171 int rlen;
1172
1173 len -= 4;
1174 buf = r = kmalloc (len, GFP_ATOMIC);
1175 if (! buf)
1176 return (0);
1177
1178 p = (void*) (h+1);
1179 for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
1180 switch (*p) {
1181 case LCP_OPT_MAGIC:
1182 /* Magic number -- extract. */
1183 if (len >= 6 && p[1] == 6) {
1184 *magic = (u32)p[2] << 24 |
1185 (u32)p[3] << 16 | p[4] << 8 | p[5];
1186 continue;
1187 }
1188 break;
1189 case LCP_OPT_ASYNC_MAP:
1190 /* Async control character map -- check to be zero. */
1191 if (len >= 6 && p[1] == 6 && ! p[2] && ! p[3] &&
1192 ! p[4] && ! p[5])
1193 continue;
1194 break;
1195 case LCP_OPT_MRU:
1196 /* Maximum receive unit -- always OK. */
1197 continue;
1198 default:
1199 /* Others not supported. */
1200 break;
1201 }
1202 /* Add the option to rejected list. */
1203 memcpy(r, p, p[1]);
1204 r += p[1];
1205 rlen += p[1];
1206 }
1207 if (rlen)
1208 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REJ, h->ident, rlen, buf);
1209 kfree(buf);
1210 return (rlen == 0);
1211}
1212
1213static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *skb)
1214{
1215 struct lcp_header *h;
1216 struct net_device *dev = sp->pp_if;
1217 int len = skb->len;
1218
1219 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
1220 if (sp->pp_flags & PP_DEBUG)
1221 printk (KERN_WARNING "%s: invalid ipcp packet length: %d bytes\n",
1222 dev->name, len);
1223 return;
1224 }
1225 h = (struct lcp_header *)skb->data;
1226 skb_pull(skb,sizeof(struct lcp_header));
1227 if (sp->pp_flags & PP_DEBUG) {
1228 printk (KERN_WARNING "%s: ipcp input: %d bytes <%s id=%xh len=%xh",
1229 dev->name, len,
1230 sppp_ipcp_type_name (h->type), h->ident, ntohs (h->len));
1231 if (len > 4)
1232 sppp_print_bytes ((u8*) (h+1), len-4);
1233 printk (">\n");
1234 }
1235 if (len > ntohs (h->len))
1236 len = ntohs (h->len);
1237 switch (h->type) {
1238 default:
1239 /* Unknown packet type -- send Code-Reject packet. */
1240 sppp_cp_send (sp, PPP_IPCP, IPCP_CODE_REJ, ++sp->pp_seq, len, h);
1241 break;
1242 case IPCP_CONF_REQ:
1243 if (len < 4) {
1244 if (sp->pp_flags & PP_DEBUG)
1245 printk (KERN_WARNING "%s: invalid ipcp configure request packet length: %d bytes\n",
1246 dev->name, len);
1247 return;
1248 }
1249 if (len > 4) {
1250 sppp_cp_send (sp, PPP_IPCP, LCP_CONF_REJ, h->ident,
1251 len-4, h+1);
1252
1253 switch (sp->ipcp.state) {
1254 case IPCP_STATE_OPENED:
1255 /* Initiate renegotiation. */
1256 sppp_ipcp_open (sp);
1257 /* fall through... */
1258 case IPCP_STATE_ACK_SENT:
1259 /* Go to closed state. */
1260 sp->ipcp.state = IPCP_STATE_CLOSED;
1261 }
1262 } else {
1263 /* Send Configure-Ack packet. */
1264 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_ACK, h->ident,
1265 0, NULL);
1266 /* Change the state. */
1267 if (sp->ipcp.state == IPCP_STATE_ACK_RCVD)
1268 sp->ipcp.state = IPCP_STATE_OPENED;
1269 else
1270 sp->ipcp.state = IPCP_STATE_ACK_SENT;
1271 }
1272 break;
1273 case IPCP_CONF_ACK:
1274 if (h->ident != sp->ipcp.confid)
1275 break;
1276 sppp_clear_timeout (sp);
1277 switch (sp->ipcp.state) {
1278 case IPCP_STATE_CLOSED:
1279 sp->ipcp.state = IPCP_STATE_ACK_RCVD;
1280 sppp_set_timeout (sp, 5);
1281 break;
1282 case IPCP_STATE_ACK_SENT:
1283 sp->ipcp.state = IPCP_STATE_OPENED;
1284 break;
1285 }
1286 break;
1287 case IPCP_CONF_NAK:
1288 case IPCP_CONF_REJ:
1289 if (h->ident != sp->ipcp.confid)
1290 break;
1291 sppp_clear_timeout (sp);
1292 /* Initiate renegotiation. */
1293 sppp_ipcp_open (sp);
1294 if (sp->ipcp.state != IPCP_STATE_ACK_SENT)
1295 /* Go to closed state. */
1296 sp->ipcp.state = IPCP_STATE_CLOSED;
1297 break;
1298 case IPCP_TERM_REQ:
1299 /* Send Terminate-Ack packet. */
1300 sppp_cp_send (sp, PPP_IPCP, IPCP_TERM_ACK, h->ident, 0, NULL);
1301 /* Go to closed state. */
1302 sp->ipcp.state = IPCP_STATE_CLOSED;
1303 /* Initiate renegotiation. */
1304 sppp_ipcp_open (sp);
1305 break;
1306 case IPCP_TERM_ACK:
1307 /* Ignore for now. */
1308 case IPCP_CODE_REJ:
1309 /* Ignore for now. */
1310 break;
1311 }
1312}
1313
1314static void sppp_lcp_open (struct sppp *sp)
1315{
1316 char opt[6];
1317
1318 if (! sp->lcp.magic)
1319 sp->lcp.magic = jiffies;
1320 opt[0] = LCP_OPT_MAGIC;
1321 opt[1] = sizeof (opt);
1322 opt[2] = sp->lcp.magic >> 24;
1323 opt[3] = sp->lcp.magic >> 16;
1324 opt[4] = sp->lcp.magic >> 8;
1325 opt[5] = sp->lcp.magic;
1326 sp->lcp.confid = ++sp->pp_seq;
1327 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REQ, sp->lcp.confid,
1328 sizeof (opt), &opt);
1329 sppp_set_timeout (sp, 2);
1330}
1331
1332static void sppp_ipcp_open (struct sppp *sp)
1333{
1334 sp->ipcp.confid = ++sp->pp_seq;
1335 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_REQ, sp->ipcp.confid, 0, NULL);
1336 sppp_set_timeout (sp, 2);
1337}
1338
1339/*
1340 * Process PPP control protocol timeouts.
1341 */
1342
1343static void sppp_cp_timeout (unsigned long arg)
1344{
1345 struct sppp *sp = (struct sppp*) arg;
1346 unsigned long flags;
1347
1348 spin_lock_irqsave(&sp->lock, flags);
1349
1350 sp->pp_flags &= ~PP_TIMO;
1351 if (! (sp->pp_if->flags & IFF_UP) || (sp->pp_flags & PP_CISCO)) {
1352 spin_unlock_irqrestore(&sp->lock, flags);
1353 return;
1354 }
1355 switch (sp->lcp.state) {
1356 case LCP_STATE_CLOSED:
1357 /* No ACK for Configure-Request, retry. */
1358 sppp_lcp_open (sp);
1359 break;
1360 case LCP_STATE_ACK_RCVD:
1361 /* ACK got, but no Configure-Request for peer, retry. */
1362 sppp_lcp_open (sp);
1363 sp->lcp.state = LCP_STATE_CLOSED;
1364 break;
1365 case LCP_STATE_ACK_SENT:
1366 /* ACK sent but no ACK for Configure-Request, retry. */
1367 sppp_lcp_open (sp);
1368 break;
1369 case LCP_STATE_OPENED:
1370 /* LCP is already OK, try IPCP. */
1371 switch (sp->ipcp.state) {
1372 case IPCP_STATE_CLOSED:
1373 /* No ACK for Configure-Request, retry. */
1374 sppp_ipcp_open (sp);
1375 break;
1376 case IPCP_STATE_ACK_RCVD:
1377 /* ACK got, but no Configure-Request for peer, retry. */
1378 sppp_ipcp_open (sp);
1379 sp->ipcp.state = IPCP_STATE_CLOSED;
1380 break;
1381 case IPCP_STATE_ACK_SENT:
1382 /* ACK sent but no ACK for Configure-Request, retry. */
1383 sppp_ipcp_open (sp);
1384 break;
1385 case IPCP_STATE_OPENED:
1386 /* IPCP is OK. */
1387 break;
1388 }
1389 break;
1390 }
1391 spin_unlock_irqrestore(&sp->lock, flags);
1392 sppp_flush_xmit();
1393}
1394
1395static char *sppp_lcp_type_name (u8 type)
1396{
1397 static char buf [8];
1398 switch (type) {
1399 case LCP_CONF_REQ: return ("conf-req");
1400 case LCP_CONF_ACK: return ("conf-ack");
1401 case LCP_CONF_NAK: return ("conf-nack");
1402 case LCP_CONF_REJ: return ("conf-rej");
1403 case LCP_TERM_REQ: return ("term-req");
1404 case LCP_TERM_ACK: return ("term-ack");
1405 case LCP_CODE_REJ: return ("code-rej");
1406 case LCP_PROTO_REJ: return ("proto-rej");
1407 case LCP_ECHO_REQ: return ("echo-req");
1408 case LCP_ECHO_REPLY: return ("echo-reply");
1409 case LCP_DISC_REQ: return ("discard-req");
1410 }
1411 sprintf (buf, "%xh", type);
1412 return (buf);
1413}
1414
1415static char *sppp_ipcp_type_name (u8 type)
1416{
1417 static char buf [8];
1418 switch (type) {
1419 case IPCP_CONF_REQ: return ("conf-req");
1420 case IPCP_CONF_ACK: return ("conf-ack");
1421 case IPCP_CONF_NAK: return ("conf-nack");
1422 case IPCP_CONF_REJ: return ("conf-rej");
1423 case IPCP_TERM_REQ: return ("term-req");
1424 case IPCP_TERM_ACK: return ("term-ack");
1425 case IPCP_CODE_REJ: return ("code-rej");
1426 }
1427 sprintf (buf, "%xh", type);
1428 return (buf);
1429}
1430
1431static void sppp_print_bytes (u_char *p, u16 len)
1432{
1433 printk (" %x", *p++);
1434 while (--len > 0)
1435 printk ("-%x", *p++);
1436}
1437
1438/**
1439 * sppp_rcv - receive and process a WAN PPP frame
1440 * @skb: The buffer to process
1441 * @dev: The device it arrived on
1442 * @p: Unused
1443 *
1444 * Protocol glue. This drives the deferred processing mode the poorer
1445 * cards use. This can be called directly by cards that do not have
1446 * timing constraints but is normally called from the network layer
1447 * after interrupt servicing to process frames queued via netif_rx.
1448 */
1449
1450static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p)
1451{
1452 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1453 return NET_RX_DROP;
1454 sppp_input(dev,skb);
1455 return 0;
1456}
1457
1458struct packet_type sppp_packet_type = {
1459 .type = __constant_htons(ETH_P_WAN_PPP),
1460 .func = sppp_rcv,
1461};
1462
1463static char banner[] __initdata =
1464 KERN_INFO "Cronyx Ltd, Synchronous PPP and CISCO HDLC (c) 1994\n"
1465 KERN_INFO "Linux port (c) 1998 Building Number Three Ltd & "
1466 "Jan \"Yenya\" Kasprzak.\n";
1467
1468static int __init sync_ppp_init(void)
1469{
1470 if(debug)
1471 debug=PP_DEBUG;
1472 printk(banner);
1473 skb_queue_head_init(&tx_queue);
1474 dev_add_pack(&sppp_packet_type);
1475 return 0;
1476}
1477
1478
1479static void __exit sync_ppp_cleanup(void)
1480{
1481 dev_remove_pack(&sppp_packet_type);
1482}
1483
1484module_init(sync_ppp_init);
1485module_exit(sync_ppp_cleanup);
1486module_param(debug, int, 0);
1487MODULE_LICENSE("GPL");
1488
diff --git a/drivers/net/wan/wanpipe_multppp.c b/drivers/net/wan/wanpipe_multppp.c
new file mode 100644
index 000000000000..6aa6987d96cb
--- /dev/null
+++ b/drivers/net/wan/wanpipe_multppp.c
@@ -0,0 +1,2357 @@
1/*****************************************************************************
2* wanpipe_multppp.c Multi-Port PPP driver module.
3*
4* Authors: Nenad Corbic <ncorbic@sangoma.com>
5*
6* Copyright: (c) 1995-2001 Sangoma Technologies Inc.
7*
8* This program is free software; you can redistribute it and/or
9* modify it under the terms of the GNU General Public License
10* as published by the Free Software Foundation; either version
11* 2 of the License, or (at your option) any later version.
12* ============================================================================
13* Dec 15 2000 Updated for 2.4.X kernel
14* Nov 15 2000 Fixed the SyncPPP support for kernels 2.2.16 and higher.
15* The pppstruct has changed.
16* Jul 13 2000 Using the kernel Syncppp module on top of RAW Wanpipe CHDLC
17* module.
18*****************************************************************************/
19
20#include <linux/module.h>
21#include <linux/kernel.h> /* printk(), and other useful stuff */
22#include <linux/stddef.h> /* offsetof(), etc. */
23#include <linux/errno.h> /* return codes */
24#include <linux/string.h> /* inline memset(), etc. */
25#include <linux/slab.h> /* kmalloc(), kfree() */
26#include <linux/wanrouter.h> /* WAN router definitions */
27#include <linux/wanpipe.h> /* WANPIPE common user API definitions */
28#include <linux/if_arp.h> /* ARPHRD_* defines */
29
30#include <linux/in.h> /* sockaddr_in */
31#include <linux/inet.h>
32#include <linux/if.h>
33#include <asm/byteorder.h> /* htons(), etc. */
34#include <linux/sdlapci.h>
35#include <asm/io.h>
36
37#include <linux/sdla_chdlc.h> /* CHDLC firmware API definitions */
38#include <linux/sdla_asy.h> /* CHDLC (async) API definitions */
39
40#include <linux/if_wanpipe_common.h> /* Socket Driver common area */
41#include <linux/if_wanpipe.h>
42
43
44#include <linux/inetdevice.h>
45#include <asm/uaccess.h>
46
47#include <net/syncppp.h>
48
49
50/****** Defines & Macros ****************************************************/
51
52#ifdef _DEBUG_
53#define STATIC
54#else
55#define STATIC static
56#endif
57
58/* reasons for enabling the timer interrupt on the adapter */
59#define TMR_INT_ENABLED_UDP 0x01
60#define TMR_INT_ENABLED_UPDATE 0x02
61#define TMR_INT_ENABLED_CONFIG 0x04
62
63#define CHDLC_DFLT_DATA_LEN 1500 /* default MTU */
64#define CHDLC_HDR_LEN 1
65
66#define IFF_POINTTOPOINT 0x10
67
68#define CHDLC_API 0x01
69
70#define PORT(x) (x == 0 ? "PRIMARY" : "SECONDARY" )
71#define MAX_BH_BUFF 10
72
73#define CRC_LENGTH 2
74#define PPP_HEADER_LEN 4
75
76/******Data Structures*****************************************************/
77
78/* This structure is placed in the private data area of the device structure.
79 * The card structure used to occupy the private area but now the following
80 * structure will incorporate the card structure along with CHDLC specific data
81 */
82
83typedef struct chdlc_private_area
84{
85 void *if_ptr; /* General Pointer used by SPPP */
86 wanpipe_common_t common;
87 sdla_t *card;
88 int TracingEnabled; /* For enabling Tracing */
89 unsigned long curr_trace_addr; /* Used for Tracing */
90 unsigned long start_trace_addr;
91 unsigned long end_trace_addr;
92 unsigned long base_addr_trace_buffer;
93 unsigned long end_addr_trace_buffer;
94 unsigned short number_trace_elements;
95 unsigned available_buffer_space;
96 unsigned long router_start_time;
97 unsigned char route_status;
98 unsigned char route_removed;
99 unsigned long tick_counter; /* For 5s timeout counter */
100 unsigned long router_up_time;
101 u32 IP_address; /* IP addressing */
102 u32 IP_netmask;
103 unsigned char mc; /* Mulitcast support on/off */
104 unsigned short udp_pkt_lgth; /* udp packet processing */
105 char udp_pkt_src;
106 char udp_pkt_data[MAX_LGTH_UDP_MGNT_PKT];
107 unsigned short timer_int_enabled;
108 char update_comms_stats; /* updating comms stats */
109
110 //FIXME: add driver stats as per frame relay!
111
112} chdlc_private_area_t;
113
114/* Route Status options */
115#define NO_ROUTE 0x00
116#define ADD_ROUTE 0x01
117#define ROUTE_ADDED 0x02
118#define REMOVE_ROUTE 0x03
119
120
121/* variable for keeping track of enabling/disabling FT1 monitor status */
122static int rCount = 0;
123
124/* variable for tracking how many interfaces to open for WANPIPE on the
125 two ports */
126
127extern void disable_irq(unsigned int);
128extern void enable_irq(unsigned int);
129
130/****** Function Prototypes *************************************************/
131/* WAN link driver entry points. These are called by the WAN router module. */
132static int update(struct wan_device* wandev);
133static int new_if(struct wan_device* wandev, struct net_device* dev,
134 wanif_conf_t* conf);
135static int del_if(struct wan_device* wandev, struct net_device* dev);
136
137/* Network device interface */
138static int if_init(struct net_device* dev);
139static int if_open(struct net_device* dev);
140static int if_close(struct net_device* dev);
141static int if_send(struct sk_buff* skb, struct net_device* dev);
142static struct net_device_stats* if_stats(struct net_device* dev);
143
144static void if_tx_timeout(struct net_device *dev);
145
146/* CHDLC Firmware interface functions */
147static int chdlc_configure (sdla_t* card, void* data);
148static int chdlc_comm_enable (sdla_t* card);
149static int chdlc_comm_disable (sdla_t* card);
150static int chdlc_read_version (sdla_t* card, char* str);
151static int chdlc_set_intr_mode (sdla_t* card, unsigned mode);
152static int chdlc_send (sdla_t* card, void* data, unsigned len);
153static int chdlc_read_comm_err_stats (sdla_t* card);
154static int chdlc_read_op_stats (sdla_t* card);
155static int config_chdlc (sdla_t *card);
156
157
158/* Miscellaneous CHDLC Functions */
159static int set_chdlc_config (sdla_t* card);
160static void init_chdlc_tx_rx_buff(sdla_t* card, struct net_device *dev);
161static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb);
162static int process_chdlc_exception(sdla_t *card);
163static int process_global_exception(sdla_t *card);
164static int update_comms_stats(sdla_t* card,
165 chdlc_private_area_t* chdlc_priv_area);
166static void port_set_state (sdla_t *card, int);
167
168/* Interrupt handlers */
169static void wsppp_isr (sdla_t* card);
170static void rx_intr (sdla_t* card);
171static void timer_intr(sdla_t *);
172
173/* Miscellaneous functions */
174static int reply_udp( unsigned char *data, unsigned int mbox_len );
175static int intr_test( sdla_t* card);
176static int udp_pkt_type( struct sk_buff *skb , sdla_t* card);
177static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
178 struct sk_buff *skb, struct net_device* dev,
179 chdlc_private_area_t* chdlc_priv_area);
180static int process_udp_mgmt_pkt(sdla_t* card, struct net_device* dev,
181 chdlc_private_area_t* chdlc_priv_area);
182static unsigned short calc_checksum (char *, int);
183static void s508_lock (sdla_t *card, unsigned long *smp_flags);
184static void s508_unlock (sdla_t *card, unsigned long *smp_flags);
185static void send_ppp_term_request(struct net_device *dev);
186
187
188static int Intr_test_counter;
189/****** Public Functions ****************************************************/
190
191/*============================================================================
192 * Cisco HDLC protocol initialization routine.
193 *
194 * This routine is called by the main WANPIPE module during setup. At this
195 * point adapter is completely initialized and firmware is running.
196 * o read firmware version (to make sure it's alive)
197 * o configure adapter
198 * o initialize protocol-specific fields of the adapter data space.
199 *
200 * Return: 0 o.k.
201 * < 0 failure.
202 */
203int wsppp_init (sdla_t* card, wandev_conf_t* conf)
204{
205 unsigned char port_num;
206 int err;
207 unsigned long max_permitted_baud = 0;
208 SHARED_MEMORY_INFO_STRUCT *flags;
209
210 union
211 {
212 char str[80];
213 } u;
214 volatile CHDLC_MAILBOX_STRUCT* mb;
215 CHDLC_MAILBOX_STRUCT* mb1;
216 unsigned long timeout;
217
218 /* Verify configuration ID */
219 if (conf->config_id != WANCONFIG_MPPP) {
220 printk(KERN_INFO "%s: invalid configuration ID %u!\n",
221 card->devname, conf->config_id);
222 return -EINVAL;
223 }
224
225 /* Find out which Port to use */
226 if ((conf->comm_port == WANOPT_PRI) || (conf->comm_port == WANOPT_SEC)){
227 if (card->next){
228
229 if (conf->comm_port != card->next->u.c.comm_port){
230 card->u.c.comm_port = conf->comm_port;
231 }else{
232 printk(KERN_ERR "%s: ERROR - %s port used!\n",
233 card->wandev.name, PORT(conf->comm_port));
234 return -EINVAL;
235 }
236 }else{
237 card->u.c.comm_port = conf->comm_port;
238 }
239 }else{
240 printk(KERN_ERR "%s: ERROR - Invalid Port Selected!\n",
241 card->wandev.name);
242 return -EINVAL;
243 }
244
245
246 /* Initialize protocol-specific fields */
247 if(card->hw.type != SDLA_S514){
248
249 if (card->u.c.comm_port == WANOPT_PRI){
250 card->mbox = (void *) card->hw.dpmbase;
251 }else{
252 card->mbox = (void *) card->hw.dpmbase +
253 SEC_BASE_ADDR_MB_STRUCT - PRI_BASE_ADDR_MB_STRUCT;
254 }
255 }else{
256 /* for a S514 adapter, set a pointer to the actual mailbox in the */
257 /* allocated virtual memory area */
258 if (card->u.c.comm_port == WANOPT_PRI){
259 card->mbox = (void *) card->hw.dpmbase + PRI_BASE_ADDR_MB_STRUCT;
260 }else{
261 card->mbox = (void *) card->hw.dpmbase + SEC_BASE_ADDR_MB_STRUCT;
262 }
263 }
264
265 mb = mb1 = card->mbox;
266
267 if (!card->configured){
268
269 /* The board will place an 'I' in the return code to indicate that it is
270 ready to accept commands. We expect this to be completed in less
271 than 1 second. */
272
273 timeout = jiffies;
274 while (mb->return_code != 'I') /* Wait 1s for board to initialize */
275 if ((jiffies - timeout) > 1*HZ) break;
276
277 if (mb->return_code != 'I') {
278 printk(KERN_INFO
279 "%s: Initialization not completed by adapter\n",
280 card->devname);
281 printk(KERN_INFO "Please contact Sangoma representative.\n");
282 return -EIO;
283 }
284 }
285
286 /* Read firmware version. Note that when adapter initializes, it
287 * clears the mailbox, so it may appear that the first command was
288 * executed successfully when in fact it was merely erased. To work
289 * around this, we execute the first command twice.
290 */
291
292 if (chdlc_read_version(card, u.str))
293 return -EIO;
294
295 printk(KERN_INFO "%s: Running Raw CHDLC firmware v%s\n"
296 "%s: for Multi-Port PPP protocol.\n",
297 card->devname,u.str,card->devname);
298
299 card->isr = &wsppp_isr;
300 card->poll = NULL;
301 card->exec = NULL;
302 card->wandev.update = &update;
303 card->wandev.new_if = &new_if;
304 card->wandev.del_if = &del_if;
305 card->wandev.udp_port = conf->udp_port;
306
307 card->wandev.new_if_cnt = 0;
308
309 /* reset the number of times the 'update()' proc has been called */
310 card->u.c.update_call_count = 0;
311
312 card->wandev.ttl = conf->ttl;
313 card->wandev.interface = conf->interface;
314
315 if ((card->u.c.comm_port == WANOPT_SEC && conf->interface == WANOPT_V35)&&
316 card->hw.type != SDLA_S514){
317 printk(KERN_INFO "%s: ERROR - V35 Interface not supported on S508 %s port \n",
318 card->devname, PORT(card->u.c.comm_port));
319 return -EIO;
320 }
321
322
323 card->wandev.clocking = conf->clocking;
324
325 port_num = card->u.c.comm_port;
326
327 /* Setup Port Bps */
328
329 if(card->wandev.clocking) {
330 if((port_num == WANOPT_PRI) || card->u.c.receive_only) {
331 /* For Primary Port 0 */
332 max_permitted_baud =
333 (card->hw.type == SDLA_S514) ?
334 PRI_MAX_BAUD_RATE_S514 :
335 PRI_MAX_BAUD_RATE_S508;
336 }
337 else if(port_num == WANOPT_SEC) {
338 /* For Secondary Port 1 */
339 max_permitted_baud =
340 (card->hw.type == SDLA_S514) ?
341 SEC_MAX_BAUD_RATE_S514 :
342 SEC_MAX_BAUD_RATE_S508;
343 }
344
345 if(conf->bps > max_permitted_baud) {
346 conf->bps = max_permitted_baud;
347 printk(KERN_INFO "%s: Baud too high!\n",
348 card->wandev.name);
349 printk(KERN_INFO "%s: Baud rate set to %lu bps\n",
350 card->wandev.name, max_permitted_baud);
351 }
352
353 card->wandev.bps = conf->bps;
354 }else{
355 card->wandev.bps = 0;
356 }
357
358 /* Setup the Port MTU */
359 if((port_num == WANOPT_PRI) || card->u.c.receive_only) {
360
361 /* For Primary Port 0 */
362 card->wandev.mtu =
363 (conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
364 min_t(unsigned int, conf->mtu, PRI_MAX_NO_DATA_BYTES_IN_FRAME) :
365 CHDLC_DFLT_DATA_LEN;
366 } else if(port_num == WANOPT_SEC) {
367 /* For Secondary Port 1 */
368 card->wandev.mtu =
369 (conf->mtu >= MIN_LGTH_CHDLC_DATA_CFG) ?
370 min_t(unsigned int, conf->mtu, SEC_MAX_NO_DATA_BYTES_IN_FRAME) :
371 CHDLC_DFLT_DATA_LEN;
372 }
373
374 /* Add on a PPP Header */
375 card->wandev.mtu += PPP_HEADER_LEN;
376
377 /* Set up the interrupt status area */
378 /* Read the CHDLC Configuration and obtain:
379 * Ptr to shared memory infor struct
380 * Use this pointer to calculate the value of card->u.c.flags !
381 */
382 mb1->buffer_length = 0;
383 mb1->command = READ_CHDLC_CONFIGURATION;
384 err = sdla_exec(mb1) ? mb1->return_code : CMD_TIMEOUT;
385 if(err != COMMAND_OK) {
386 clear_bit(1, (void*)&card->wandev.critical);
387
388 if(card->hw.type != SDLA_S514)
389 enable_irq(card->hw.irq);
390
391 chdlc_error(card, err, mb1);
392 return -EIO;
393 }
394
395 if(card->hw.type == SDLA_S514){
396 card->u.c.flags = (void *)(card->hw.dpmbase +
397 (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
398 ptr_shared_mem_info_struct));
399 }else{
400 card->u.c.flags = (void *)(card->hw.dpmbase +
401 (((CHDLC_CONFIGURATION_STRUCT *)mb1->data)->
402 ptr_shared_mem_info_struct % SDLA_WINDOWSIZE));
403 }
404
405 flags = card->u.c.flags;
406
407 /* This is for the ports link state */
408 card->wandev.state = WAN_DUALPORT;
409 card->u.c.state = WAN_DISCONNECTED;
410
411
412 if (!card->wandev.piggyback){
413 err = intr_test(card);
414
415 if(err || (Intr_test_counter < MAX_INTR_TEST_COUNTER)) {
416 printk(KERN_ERR "%s: Interrupt test failed (%i)\n",
417 card->devname, Intr_test_counter);
418 printk(KERN_ERR "%s: Please choose another interrupt\n",
419 card->devname);
420 return -EIO;
421 }
422
423 printk(KERN_INFO "%s: Interrupt test passed (%i)\n",
424 card->devname, Intr_test_counter);
425 }
426
427
428 if (chdlc_set_intr_mode(card, APP_INT_ON_TIMER)){
429 printk (KERN_INFO "%s: Failed to set interrupt triggers!\n",
430 card->devname);
431 return -EIO;
432 }
433
434 /* Mask the Timer interrupt */
435 flags->interrupt_info_struct.interrupt_permission &=
436 ~APP_INT_ON_TIMER;
437
438 printk(KERN_INFO "\n");
439
440 return 0;
441}
442
443/******* WAN Device Driver Entry Points *************************************/
444
445/*============================================================================
446 * Update device status & statistics
447 * This procedure is called when updating the PROC file system and returns
448 * various communications statistics. These statistics are accumulated from 3
449 * different locations:
450 * 1) The 'if_stats' recorded for the device.
451 * 2) Communication error statistics on the adapter.
452 * 3) CHDLC operational statistics on the adapter.
453 * The board level statistics are read during a timer interrupt. Note that we
454 * read the error and operational statistics during consecitive timer ticks so
455 * as to minimize the time that we are inside the interrupt handler.
456 *
457 */
458static int update(struct wan_device* wandev)
459{
460 sdla_t* card = wandev->private;
461 struct net_device* dev;
462 volatile chdlc_private_area_t* chdlc_priv_area;
463 SHARED_MEMORY_INFO_STRUCT *flags;
464 unsigned long timeout;
465
466 /* sanity checks */
467 if((wandev == NULL) || (wandev->private == NULL))
468 return -EFAULT;
469
470 if(wandev->state == WAN_UNCONFIGURED)
471 return -ENODEV;
472
473 /* more sanity checks */
474 if(!card->u.c.flags)
475 return -ENODEV;
476
477 if((dev=card->wandev.dev) == NULL)
478 return -ENODEV;
479
480 if((chdlc_priv_area=dev->priv) == NULL)
481 return -ENODEV;
482
483 flags = card->u.c.flags;
484
485 if(chdlc_priv_area->update_comms_stats){
486 return -EAGAIN;
487 }
488
489 /* we will need 2 timer interrupts to complete the */
490 /* reading of the statistics */
491 chdlc_priv_area->update_comms_stats = 2;
492 flags->interrupt_info_struct.interrupt_permission |= APP_INT_ON_TIMER;
493 chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UPDATE;
494
495 /* wait a maximum of 1 second for the statistics to be updated */
496 timeout = jiffies;
497 for(;;) {
498 if(chdlc_priv_area->update_comms_stats == 0)
499 break;
500 if ((jiffies - timeout) > (1 * HZ)){
501 chdlc_priv_area->update_comms_stats = 0;
502 chdlc_priv_area->timer_int_enabled &=
503 ~TMR_INT_ENABLED_UPDATE;
504 return -EAGAIN;
505 }
506 }
507
508 return 0;
509}
510
511
512/*============================================================================
513 * Create new logical channel.
514 * This routine is called by the router when ROUTER_IFNEW IOCTL is being
515 * handled.
516 * o parse media- and hardware-specific configuration
517 * o make sure that a new channel can be created
518 * o allocate resources, if necessary
519 * o prepare network device structure for registaration.
520 *
521 * Return: 0 o.k.
522 * < 0 failure (channel will not be created)
523 */
524static int new_if(struct wan_device* wandev, struct net_device* pdev,
525 wanif_conf_t* conf)
526{
527
528 struct ppp_device *pppdev = (struct ppp_device *)pdev;
529 struct net_device *dev = NULL;
530 struct sppp *sp;
531 sdla_t* card = wandev->private;
532 chdlc_private_area_t* chdlc_priv_area;
533
534 if ((conf->name[0] == '\0') || (strlen(conf->name) > WAN_IFNAME_SZ)) {
535 printk(KERN_INFO "%s: invalid interface name!\n",
536 card->devname);
537 return -EINVAL;
538 }
539
540 /* allocate and initialize private data */
541 chdlc_priv_area = kmalloc(sizeof(chdlc_private_area_t), GFP_KERNEL);
542
543 if(chdlc_priv_area == NULL)
544 return -ENOMEM;
545
546 memset(chdlc_priv_area, 0, sizeof(chdlc_private_area_t));
547
548 chdlc_priv_area->card = card;
549
550 /* initialize data */
551 strcpy(card->u.c.if_name, conf->name);
552
553 if(card->wandev.new_if_cnt > 0) {
554 kfree(chdlc_priv_area);
555 return -EEXIST;
556 }
557
558 card->wandev.new_if_cnt++;
559
560 chdlc_priv_area->TracingEnabled = 0;
561
562 //We don't need this any more
563 chdlc_priv_area->route_status = NO_ROUTE;
564 chdlc_priv_area->route_removed = 0;
565
566 printk(KERN_INFO "%s: Firmware running in HDLC STREAMING Mode\n",
567 wandev->name);
568
569 /* Setup wanpipe as a router (WANPIPE) or as an API */
570 if( strcmp(conf->usedby, "WANPIPE") == 0) {
571 printk(KERN_INFO "%s: Driver running in WANPIPE mode!\n",
572 wandev->name);
573 card->u.c.usedby = WANPIPE;
574 } else {
575 printk(KERN_INFO
576 "%s: API Mode is not supported for SyncPPP!\n",
577 wandev->name);
578 kfree(chdlc_priv_area);
579 return -EINVAL;
580 }
581
582 /* Get Multicast Information */
583 chdlc_priv_area->mc = conf->mc;
584
585
586 chdlc_priv_area->if_ptr = pppdev;
587
588 /* prepare network device data space for registration */
589
590 strcpy(dev->name,card->u.c.if_name);
591
592 /* Attach PPP protocol layer to pppdev
593 * The sppp_attach() will initilize the dev structure
594 * and setup ppp layer protocols.
595 * All we have to do is to bind in:
596 * if_open(), if_close(), if_send() and get_stats() functions.
597 */
598 sppp_attach(pppdev);
599 dev = pppdev->dev;
600 sp = &pppdev->sppp;
601
602 /* Enable PPP Debugging */
603 // FIXME Fix this up somehow
604 //sp->pp_flags |= PP_DEBUG;
605 sp->pp_flags &= ~PP_CISCO;
606
607 dev->init = &if_init;
608 dev->priv = chdlc_priv_area;
609
610 return 0;
611}
612
613
614
615
616/*============================================================================
617 * Delete logical channel.
618 */
619static int del_if(struct wan_device* wandev, struct net_device* dev)
620{
621 chdlc_private_area_t *chdlc_priv_area = dev->priv;
622 sdla_t *card = chdlc_priv_area->card;
623 unsigned long smp_lock;
624
625 /* Detach the PPP layer */
626 printk(KERN_INFO "%s: Detaching SyncPPP Module from %s\n",
627 wandev->name,dev->name);
628
629 lock_adapter_irq(&wandev->lock,&smp_lock);
630
631 sppp_detach(dev);
632 chdlc_priv_area->if_ptr=NULL;
633
634 chdlc_set_intr_mode(card, 0);
635 if (card->u.c.comm_enabled)
636 chdlc_comm_disable(card);
637 unlock_adapter_irq(&wandev->lock,&smp_lock);
638
639 port_set_state(card, WAN_DISCONNECTED);
640
641 return 0;
642}
643
644
645/****** Network Device Interface ********************************************/
646
647/*============================================================================
648 * Initialize Linux network interface.
649 *
650 * This routine is called only once for each interface, during Linux network
651 * interface registration. Returning anything but zero will fail interface
652 * registration.
653 */
654static int if_init(struct net_device* dev)
655{
656 chdlc_private_area_t* chdlc_priv_area = dev->priv;
657 sdla_t* card = chdlc_priv_area->card;
658 struct wan_device* wandev = &card->wandev;
659
660 /* NOTE: Most of the dev initialization was
661 * done in sppp_attach(), called by new_if()
662 * function. All we have to do here is
663 * to link four major routines below.
664 */
665
666 /* Initialize device driver entry points */
667 dev->open = &if_open;
668 dev->stop = &if_close;
669 dev->hard_start_xmit = &if_send;
670 dev->get_stats = &if_stats;
671 dev->tx_timeout = &if_tx_timeout;
672 dev->watchdog_timeo = TX_TIMEOUT;
673
674
675 /* Initialize hardware parameters */
676 dev->irq = wandev->irq;
677 dev->dma = wandev->dma;
678 dev->base_addr = wandev->ioport;
679 dev->mem_start = wandev->maddr;
680 dev->mem_end = wandev->maddr + wandev->msize - 1;
681
682 /* Set transmit buffer queue length
683 * If we over fill this queue the packets will
684 * be droped by the kernel.
685 * sppp_attach() sets this to 10, but
686 * 100 will give us more room at low speeds.
687 */
688 dev->tx_queue_len = 100;
689
690 return 0;
691}
692
693
694/*============================================================================
695 * Handle transmit timeout event from netif watchdog
696 */
697static void if_tx_timeout(struct net_device *dev)
698{
699 chdlc_private_area_t* chan = dev->priv;
700 sdla_t *card = chan->card;
701
702 /* If our device stays busy for at least 5 seconds then we will
703 * kick start the device by making dev->tbusy = 0. We expect
704 * that our device never stays busy more than 5 seconds. So this
705 * is only used as a last resort.
706 */
707
708 ++card->wandev.stats.collisions;
709
710 printk (KERN_INFO "%s: Transmit timed out on %s\n", card->devname,dev->name);
711 netif_wake_queue (dev);
712}
713
714
715/*============================================================================
716 * Open network interface.
717 * o enable communications and interrupts.
718 * o prevent module from unloading by incrementing use count
719 *
720 * Return 0 if O.k. or errno.
721 */
722static int if_open(struct net_device* dev)
723{
724 chdlc_private_area_t* chdlc_priv_area = dev->priv;
725 sdla_t* card = chdlc_priv_area->card;
726 struct timeval tv;
727 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
728
729 /* Only one open per interface is allowed */
730 if (netif_running(dev))
731 return -EBUSY;
732
733 /* Start PPP Layer */
734 if (sppp_open(dev)){
735 return -EIO;
736 }
737
738 do_gettimeofday(&tv);
739 chdlc_priv_area->router_start_time = tv.tv_sec;
740
741 netif_start_queue(dev);
742
743 wanpipe_open(card);
744
745 chdlc_priv_area->timer_int_enabled |= TMR_INT_ENABLED_CONFIG;
746 flags->interrupt_info_struct.interrupt_permission |= APP_INT_ON_TIMER;
747 return 0;
748}
749
750/*============================================================================
751 * Close network interface.
752 * o if this is the last close, then disable communications and interrupts.
753 * o reset flags.
754 */
755static int if_close(struct net_device* dev)
756{
757 chdlc_private_area_t* chdlc_priv_area = dev->priv;
758 sdla_t* card = chdlc_priv_area->card;
759
760 /* Stop the PPP Layer */
761 sppp_close(dev);
762 netif_stop_queue(dev);
763
764 wanpipe_close(card);
765
766 return 0;
767}
768
769/*============================================================================
770 * Send a packet on a network interface.
771 * o set tbusy flag (marks start of the transmission) to block a timer-based
772 * transmit from overlapping.
773 * o check link state. If link is not up, then drop the packet.
774 * o execute adapter send command.
775 * o free socket buffer
776 *
777 * Return: 0 complete (socket buffer must be freed)
778 * non-0 packet may be re-transmitted (tbusy must be set)
779 *
780 * Notes:
781 * 1. This routine is called either by the protocol stack or by the "net
782 * bottom half" (with interrupts enabled).
783 * 2. Setting tbusy flag will inhibit further transmit requests from the
784 * protocol stack and can be used for flow control with protocol layer.
785 */
786static int if_send(struct sk_buff* skb, struct net_device* dev)
787{
788 chdlc_private_area_t *chdlc_priv_area = dev->priv;
789 sdla_t *card = chdlc_priv_area->card;
790 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
791 INTERRUPT_INFORMATION_STRUCT *chdlc_int = &flags->interrupt_info_struct;
792 int udp_type = 0;
793 unsigned long smp_flags;
794 int err=0;
795
796 netif_stop_queue(dev);
797
798
799 if (skb == NULL){
800 /* If we get here, some higher layer thinks we've missed an
801 * tx-done interrupt.
802 */
803 printk(KERN_INFO "%s: Received NULL skb buffer! interface %s got kicked!\n",
804 card->devname, dev->name);
805
806 netif_wake_queue(dev);
807 return 0;
808 }
809
810 if (ntohs(skb->protocol) != htons(PVC_PROT)){
811 /* check the udp packet type */
812
813 udp_type = udp_pkt_type(skb, card);
814 if (udp_type == UDP_CPIPE_TYPE){
815 if(store_udp_mgmt_pkt(UDP_PKT_FRM_STACK, card, skb, dev,
816 chdlc_priv_area)){
817 chdlc_int->interrupt_permission |=
818 APP_INT_ON_TIMER;
819 }
820 netif_start_queue(dev);
821 return 0;
822 }
823 }
824
825 /* Lock the 508 Card: SMP is supported */
826 if(card->hw.type != SDLA_S514){
827 s508_lock(card,&smp_flags);
828 }
829
830 if (test_and_set_bit(SEND_CRIT, (void*)&card->wandev.critical)){
831
832 printk(KERN_INFO "%s: Critical in if_send: %lx\n",
833 card->wandev.name,card->wandev.critical);
834 ++card->wandev.stats.tx_dropped;
835 netif_start_queue(dev);
836 goto if_send_crit_exit;
837 }
838
839 if (card->wandev.state != WAN_CONNECTED){
840 ++card->wandev.stats.tx_dropped;
841 netif_start_queue(dev);
842 goto if_send_crit_exit;
843 }
844
845 if (chdlc_send(card, skb->data, skb->len)){
846 netif_stop_queue(dev);
847
848 }else{
849 ++card->wandev.stats.tx_packets;
850 card->wandev.stats.tx_bytes += skb->len;
851 dev->trans_start = jiffies;
852 netif_start_queue(dev);
853 }
854
855if_send_crit_exit:
856 if (!(err=netif_queue_stopped(dev))){
857 dev_kfree_skb_any(skb);
858 }else{
859 chdlc_priv_area->tick_counter = jiffies;
860 chdlc_int->interrupt_permission |= APP_INT_ON_TX_FRAME;
861 }
862
863 clear_bit(SEND_CRIT, (void*)&card->wandev.critical);
864 if(card->hw.type != SDLA_S514){
865 s508_unlock(card,&smp_flags);
866 }
867
868 return err;
869}
870
871
872/*============================================================================
873 * Reply to UDP Management system.
874 * Return length of reply.
875 */
876static int reply_udp( unsigned char *data, unsigned int mbox_len )
877{
878
879 unsigned short len, udp_length, temp, ip_length;
880 unsigned long ip_temp;
881 int even_bound = 0;
882 chdlc_udp_pkt_t *c_udp_pkt = (chdlc_udp_pkt_t *)data;
883
884 /* Set length of packet */
885 len = sizeof(ip_pkt_t)+
886 sizeof(udp_pkt_t)+
887 sizeof(wp_mgmt_t)+
888 sizeof(cblock_t)+
889 sizeof(trace_info_t)+
890 mbox_len;
891
892 /* fill in UDP reply */
893 c_udp_pkt->wp_mgmt.request_reply = UDPMGMT_REPLY;
894
895 /* fill in UDP length */
896 udp_length = sizeof(udp_pkt_t)+
897 sizeof(wp_mgmt_t)+
898 sizeof(cblock_t)+
899 sizeof(trace_info_t)+
900 mbox_len;
901
902 /* put it on an even boundary */
903 if ( udp_length & 0x0001 ) {
904 udp_length += 1;
905 len += 1;
906 even_bound = 1;
907 }
908
909 temp = (udp_length<<8)|(udp_length>>8);
910 c_udp_pkt->udp_pkt.udp_length = temp;
911
912 /* swap UDP ports */
913 temp = c_udp_pkt->udp_pkt.udp_src_port;
914 c_udp_pkt->udp_pkt.udp_src_port =
915 c_udp_pkt->udp_pkt.udp_dst_port;
916 c_udp_pkt->udp_pkt.udp_dst_port = temp;
917
918 /* add UDP pseudo header */
919 temp = 0x1100;
920 *((unsigned short *)(c_udp_pkt->data+mbox_len+even_bound)) = temp;
921 temp = (udp_length<<8)|(udp_length>>8);
922 *((unsigned short *)(c_udp_pkt->data+mbox_len+even_bound+2)) = temp;
923
924
925 /* calculate UDP checksum */
926 c_udp_pkt->udp_pkt.udp_checksum = 0;
927 c_udp_pkt->udp_pkt.udp_checksum = calc_checksum(&data[UDP_OFFSET],udp_length+UDP_OFFSET);
928
929 /* fill in IP length */
930 ip_length = len;
931 temp = (ip_length<<8)|(ip_length>>8);
932 c_udp_pkt->ip_pkt.total_length = temp;
933
934 /* swap IP addresses */
935 ip_temp = c_udp_pkt->ip_pkt.ip_src_address;
936 c_udp_pkt->ip_pkt.ip_src_address = c_udp_pkt->ip_pkt.ip_dst_address;
937 c_udp_pkt->ip_pkt.ip_dst_address = ip_temp;
938
939 /* fill in IP checksum */
940 c_udp_pkt->ip_pkt.hdr_checksum = 0;
941 c_udp_pkt->ip_pkt.hdr_checksum = calc_checksum(data,sizeof(ip_pkt_t));
942
943 return len;
944
945} /* reply_udp */
946
947unsigned short calc_checksum (char *data, int len)
948{
949 unsigned short temp;
950 unsigned long sum=0;
951 int i;
952
953 for( i = 0; i <len; i+=2 ) {
954 memcpy(&temp,&data[i],2);
955 sum += (unsigned long)temp;
956 }
957
958 while (sum >> 16 ) {
959 sum = (sum & 0xffffUL) + (sum >> 16);
960 }
961
962 temp = (unsigned short)sum;
963 temp = ~temp;
964
965 if( temp == 0 )
966 temp = 0xffff;
967
968 return temp;
969}
970
971
972/*============================================================================
973 * Get ethernet-style interface statistics.
974 * Return a pointer to struct enet_statistics.
975 */
976static struct net_device_stats* if_stats(struct net_device* dev)
977{
978 sdla_t *my_card;
979 chdlc_private_area_t* chdlc_priv_area;
980
981 /* Shutdown bug fix. In del_if() we kill
982 * dev->priv pointer. This function, gets
983 * called after del_if(), thus check
984 * if pointer has been deleted */
985 if ((chdlc_priv_area=dev->priv) == NULL)
986 return NULL;
987
988 my_card = chdlc_priv_area->card;
989 return &my_card->wandev.stats;
990}
991
992
993/****** Cisco HDLC Firmware Interface Functions *******************************/
994
995/*============================================================================
996 * Read firmware code version.
997 * Put code version as ASCII string in str.
998 */
999static int chdlc_read_version (sdla_t* card, char* str)
1000{
1001 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1002 int len;
1003 char err;
1004 mb->buffer_length = 0;
1005 mb->command = READ_CHDLC_CODE_VERSION;
1006 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1007
1008 if(err != COMMAND_OK) {
1009 chdlc_error(card,err,mb);
1010 }
1011 else if (str) { /* is not null */
1012 len = mb->buffer_length;
1013 memcpy(str, mb->data, len);
1014 str[len] = '\0';
1015 }
1016 return (err);
1017}
1018
1019/*-----------------------------------------------------------------------------
1020 * Configure CHDLC firmware.
1021 */
1022static int chdlc_configure (sdla_t* card, void* data)
1023{
1024 int err;
1025 CHDLC_MAILBOX_STRUCT *mailbox = card->mbox;
1026 int data_length = sizeof(CHDLC_CONFIGURATION_STRUCT);
1027
1028 mailbox->buffer_length = data_length;
1029 memcpy(mailbox->data, data, data_length);
1030 mailbox->command = SET_CHDLC_CONFIGURATION;
1031 err = sdla_exec(mailbox) ? mailbox->return_code : CMD_TIMEOUT;
1032
1033 if (err != COMMAND_OK) chdlc_error (card, err, mailbox);
1034
1035 return err;
1036}
1037
1038
1039/*============================================================================
1040 * Set interrupt mode -- HDLC Version.
1041 */
1042
1043static int chdlc_set_intr_mode (sdla_t* card, unsigned mode)
1044{
1045 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1046 CHDLC_INT_TRIGGERS_STRUCT* int_data =
1047 (CHDLC_INT_TRIGGERS_STRUCT *)mb->data;
1048 int err;
1049
1050 int_data->CHDLC_interrupt_triggers = mode;
1051 int_data->IRQ = card->hw.irq;
1052 int_data->interrupt_timer = 1;
1053
1054 mb->buffer_length = sizeof(CHDLC_INT_TRIGGERS_STRUCT);
1055 mb->command = SET_CHDLC_INTERRUPT_TRIGGERS;
1056 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1057 if (err != COMMAND_OK)
1058 chdlc_error (card, err, mb);
1059 return err;
1060}
1061
1062
1063/*============================================================================
1064 * Enable communications.
1065 */
1066
1067static int chdlc_comm_enable (sdla_t* card)
1068{
1069 int err;
1070 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1071
1072 mb->buffer_length = 0;
1073 mb->command = ENABLE_CHDLC_COMMUNICATIONS;
1074 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1075 if (err != COMMAND_OK)
1076 chdlc_error(card, err, mb);
1077 else
1078 card->u.c.comm_enabled=1;
1079
1080 return err;
1081}
1082
1083/*============================================================================
1084 * Disable communications and Drop the Modem lines (DCD and RTS).
1085 */
1086static int chdlc_comm_disable (sdla_t* card)
1087{
1088 int err;
1089 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1090
1091 mb->buffer_length = 0;
1092 mb->command = DISABLE_CHDLC_COMMUNICATIONS;
1093 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1094 if (err != COMMAND_OK)
1095 chdlc_error(card,err,mb);
1096
1097 return err;
1098}
1099
1100/*============================================================================
1101 * Read communication error statistics.
1102 */
1103static int chdlc_read_comm_err_stats (sdla_t* card)
1104{
1105 int err;
1106 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1107
1108 mb->buffer_length = 0;
1109 mb->command = READ_COMMS_ERROR_STATS;
1110 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1111 if (err != COMMAND_OK)
1112 chdlc_error(card,err,mb);
1113 return err;
1114}
1115
1116
1117/*============================================================================
1118 * Read CHDLC operational statistics.
1119 */
1120static int chdlc_read_op_stats (sdla_t* card)
1121{
1122 int err;
1123 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1124
1125 mb->buffer_length = 0;
1126 mb->command = READ_CHDLC_OPERATIONAL_STATS;
1127 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1128 if (err != COMMAND_OK)
1129 chdlc_error(card,err,mb);
1130 return err;
1131}
1132
1133
1134/*============================================================================
1135 * Update communications error and general packet statistics.
1136 */
1137static int update_comms_stats(sdla_t* card,
1138 chdlc_private_area_t* chdlc_priv_area)
1139{
1140 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1141 COMMS_ERROR_STATS_STRUCT* err_stats;
1142 CHDLC_OPERATIONAL_STATS_STRUCT *op_stats;
1143
1144 /* on the first timer interrupt, read the comms error statistics */
1145 if(chdlc_priv_area->update_comms_stats == 2) {
1146 if(chdlc_read_comm_err_stats(card))
1147 return 1;
1148 err_stats = (COMMS_ERROR_STATS_STRUCT *)mb->data;
1149 card->wandev.stats.rx_over_errors =
1150 err_stats->Rx_overrun_err_count;
1151 card->wandev.stats.rx_crc_errors =
1152 err_stats->CRC_err_count;
1153 card->wandev.stats.rx_frame_errors =
1154 err_stats->Rx_abort_count;
1155 card->wandev.stats.rx_fifo_errors =
1156 err_stats->Rx_dis_pri_bfrs_full_count;
1157 card->wandev.stats.rx_missed_errors =
1158 card->wandev.stats.rx_fifo_errors;
1159 card->wandev.stats.tx_aborted_errors =
1160 err_stats->sec_Tx_abort_count;
1161 }
1162
1163 /* on the second timer interrupt, read the operational statistics */
1164 else {
1165 if(chdlc_read_op_stats(card))
1166 return 1;
1167 op_stats = (CHDLC_OPERATIONAL_STATS_STRUCT *)mb->data;
1168 card->wandev.stats.rx_length_errors =
1169 (op_stats->Rx_Data_discard_short_count +
1170 op_stats->Rx_Data_discard_long_count);
1171 }
1172
1173 return 0;
1174}
1175
1176/*============================================================================
1177 * Send packet.
1178 * Return: 0 - o.k.
1179 * 1 - no transmit buffers available
1180 */
1181static int chdlc_send (sdla_t* card, void* data, unsigned len)
1182{
1183 CHDLC_DATA_TX_STATUS_EL_STRUCT *txbuf = card->u.c.txbuf;
1184
1185 if (txbuf->opp_flag)
1186 return 1;
1187
1188 sdla_poke(&card->hw, txbuf->ptr_data_bfr, data, len);
1189
1190 txbuf->frame_length = len;
1191 txbuf->opp_flag = 1; /* start transmission */
1192
1193 /* Update transmit buffer control fields */
1194 card->u.c.txbuf = ++txbuf;
1195
1196 if ((void*)txbuf > card->u.c.txbuf_last)
1197 card->u.c.txbuf = card->u.c.txbuf_base;
1198
1199 return 0;
1200}
1201
1202/****** Firmware Error Handler **********************************************/
1203
1204/*============================================================================
1205 * Firmware error handler.
1206 * This routine is called whenever firmware command returns non-zero
1207 * return code.
1208 *
1209 * Return zero if previous command has to be cancelled.
1210 */
1211static int chdlc_error (sdla_t *card, int err, CHDLC_MAILBOX_STRUCT *mb)
1212{
1213 unsigned cmd = mb->command;
1214
1215 switch (err) {
1216
1217 case CMD_TIMEOUT:
1218 printk(KERN_ERR "%s: command 0x%02X timed out!\n",
1219 card->devname, cmd);
1220 break;
1221
1222 case S514_BOTH_PORTS_SAME_CLK_MODE:
1223 if(cmd == SET_CHDLC_CONFIGURATION) {
1224 printk(KERN_INFO
1225 "%s: Configure both ports for the same clock source\n",
1226 card->devname);
1227 break;
1228 }
1229
1230 default:
1231 printk(KERN_INFO "%s: command 0x%02X returned 0x%02X!\n",
1232 card->devname, cmd, err);
1233 }
1234
1235 return 0;
1236}
1237
1238/****** Interrupt Handlers **************************************************/
1239
1240/*============================================================================
1241 * Cisco HDLC interrupt service routine.
1242 */
1243STATIC void wsppp_isr (sdla_t* card)
1244{
1245 struct net_device* dev;
1246 SHARED_MEMORY_INFO_STRUCT* flags = NULL;
1247 int i;
1248 sdla_t *my_card;
1249
1250
1251 /* Check for which port the interrupt has been generated
1252 * Since Secondary Port is piggybacking on the Primary
1253 * the check must be done here.
1254 */
1255
1256 flags = card->u.c.flags;
1257 if (!flags->interrupt_info_struct.interrupt_type){
1258 /* Check for a second port (piggybacking) */
1259 if((my_card = card->next)){
1260 flags = my_card->u.c.flags;
1261 if (flags->interrupt_info_struct.interrupt_type){
1262 card = my_card;
1263 card->isr(card);
1264 return;
1265 }
1266 }
1267 }
1268
1269 dev = card->wandev.dev;
1270 card->in_isr = 1;
1271 flags = card->u.c.flags;
1272
1273 /* If we get an interrupt with no network device, stop the interrupts
1274 * and issue an error */
1275 if ((!dev || !dev->priv) && flags->interrupt_info_struct.interrupt_type !=
1276 COMMAND_COMPLETE_APP_INT_PEND){
1277 goto isr_done;
1278 }
1279
1280
1281 /* if critical due to peripheral operations
1282 * ie. update() or getstats() then reset the interrupt and
1283 * wait for the board to retrigger.
1284 */
1285 if(test_bit(PERI_CRIT, (void*)&card->wandev.critical)) {
1286 flags->interrupt_info_struct.
1287 interrupt_type = 0;
1288 goto isr_done;
1289 }
1290
1291
1292 /* On a 508 Card, if critical due to if_send
1293 * Major Error !!!
1294 */
1295 if(card->hw.type != SDLA_S514) {
1296 if(test_bit(0, (void*)&card->wandev.critical)) {
1297 printk(KERN_INFO "%s: Critical while in ISR: %lx\n",
1298 card->devname, card->wandev.critical);
1299 goto isr_done;
1300 }
1301 }
1302
1303 switch(flags->interrupt_info_struct.interrupt_type) {
1304
1305 case RX_APP_INT_PEND: /* 0x01: receive interrupt */
1306 rx_intr(card);
1307 break;
1308
1309 case TX_APP_INT_PEND: /* 0x02: transmit interrupt */
1310 flags->interrupt_info_struct.interrupt_permission &=
1311 ~APP_INT_ON_TX_FRAME;
1312
1313 netif_wake_queue(dev);
1314 break;
1315
1316 case COMMAND_COMPLETE_APP_INT_PEND:/* 0x04: cmd cplt */
1317 ++ Intr_test_counter;
1318 break;
1319
1320 case CHDLC_EXCEP_COND_APP_INT_PEND: /* 0x20 */
1321 process_chdlc_exception(card);
1322 break;
1323
1324 case GLOBAL_EXCEP_COND_APP_INT_PEND:
1325 process_global_exception(card);
1326 break;
1327
1328 case TIMER_APP_INT_PEND:
1329 timer_intr(card);
1330 break;
1331
1332 default:
1333 printk(KERN_INFO "%s: spurious interrupt 0x%02X!\n",
1334 card->devname,
1335 flags->interrupt_info_struct.interrupt_type);
1336 printk(KERN_INFO "Code name: ");
1337 for(i = 0; i < 4; i ++)
1338 printk(KERN_INFO "%c",
1339 flags->global_info_struct.codename[i]);
1340 printk(KERN_INFO "\nCode version: ");
1341 for(i = 0; i < 4; i ++)
1342 printk(KERN_INFO "%c",
1343 flags->global_info_struct.codeversion[i]);
1344 printk(KERN_INFO "\n");
1345 break;
1346 }
1347
1348isr_done:
1349 card->in_isr = 0;
1350 flags->interrupt_info_struct.interrupt_type = 0;
1351}
1352
1353/*============================================================================
1354 * Receive interrupt handler.
1355 */
1356static void rx_intr (sdla_t* card)
1357{
1358 struct net_device *dev;
1359 chdlc_private_area_t *chdlc_priv_area;
1360 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
1361 CHDLC_DATA_RX_STATUS_EL_STRUCT *rxbuf = card->u.c.rxmb;
1362 struct sk_buff *skb;
1363 unsigned len;
1364 unsigned addr = rxbuf->ptr_data_bfr;
1365 void *buf;
1366 int i,udp_type;
1367
1368 if (rxbuf->opp_flag != 0x01) {
1369 printk(KERN_INFO
1370 "%s: corrupted Rx buffer @ 0x%X, flag = 0x%02X!\n",
1371 card->devname, (unsigned)rxbuf, rxbuf->opp_flag);
1372 printk(KERN_INFO "Code name: ");
1373 for(i = 0; i < 4; i ++)
1374 printk(KERN_INFO "%c",
1375 flags->global_info_struct.codename[i]);
1376 printk(KERN_INFO "\nCode version: ");
1377 for(i = 0; i < 4; i ++)
1378 printk(KERN_INFO "%c",
1379 flags->global_info_struct.codeversion[i]);
1380 printk(KERN_INFO "\n");
1381
1382
1383 /* Bug Fix: Mar 6 2000
1384 * If we get a corrupted mailbox, it measn that driver
1385 * is out of sync with the firmware. There is no recovery.
1386 * If we don't turn off all interrupts for this card
1387 * the machine will crash.
1388 */
1389 printk(KERN_INFO "%s: Critical router failure ...!!!\n", card->devname);
1390 printk(KERN_INFO "Please contact Sangoma Technologies !\n");
1391 chdlc_set_intr_mode(card,0);
1392 return;
1393 }
1394
1395 dev = card->wandev.dev;
1396
1397 if (!dev){
1398 goto rx_exit;
1399 }
1400
1401 if (!netif_running(dev)){
1402 goto rx_exit;
1403 }
1404
1405 chdlc_priv_area = dev->priv;
1406
1407 if (rxbuf->error_flag){
1408 goto rx_exit;
1409 }
1410 /* Take off two CRC bytes */
1411
1412 if (rxbuf->frame_length < 7 || rxbuf->frame_length > 1506 ){
1413 goto rx_exit;
1414 }
1415
1416 len = rxbuf->frame_length - CRC_LENGTH;
1417
1418 /* Allocate socket buffer */
1419 skb = dev_alloc_skb(len);
1420
1421 if (skb == NULL) {
1422 if (net_ratelimit()){
1423 printk(KERN_INFO "%s: no socket buffers available!\n",
1424 card->devname);
1425 }
1426 ++card->wandev.stats.rx_dropped;
1427 goto rx_exit;
1428 }
1429
1430 /* Copy data to the socket buffer */
1431 if((addr + len) > card->u.c.rx_top + 1) {
1432 unsigned tmp = card->u.c.rx_top - addr + 1;
1433 buf = skb_put(skb, tmp);
1434 sdla_peek(&card->hw, addr, buf, tmp);
1435 addr = card->u.c.rx_base;
1436 len -= tmp;
1437 }
1438
1439 buf = skb_put(skb, len);
1440 sdla_peek(&card->hw, addr, buf, len);
1441
1442 skb->protocol = htons(ETH_P_WAN_PPP);
1443
1444 card->wandev.stats.rx_packets ++;
1445 card->wandev.stats.rx_bytes += skb->len;
1446 udp_type = udp_pkt_type( skb, card );
1447
1448 if(udp_type == UDP_CPIPE_TYPE) {
1449 if(store_udp_mgmt_pkt(UDP_PKT_FRM_NETWORK,
1450 card, skb, dev, chdlc_priv_area)) {
1451 flags->interrupt_info_struct.
1452 interrupt_permission |=
1453 APP_INT_ON_TIMER;
1454 }
1455 }else{
1456 /* Pass it up the protocol stack */
1457 skb->dev = dev;
1458 skb->mac.raw = skb->data;
1459 netif_rx(skb);
1460 dev->last_rx = jiffies;
1461 }
1462
1463rx_exit:
1464 /* Release buffer element and calculate a pointer to the next one */
1465 rxbuf->opp_flag = 0x00;
1466 card->u.c.rxmb = ++ rxbuf;
1467 if((void*)rxbuf > card->u.c.rxbuf_last){
1468 card->u.c.rxmb = card->u.c.rxbuf_base;
1469 }
1470}
1471
1472/*============================================================================
1473 * Timer interrupt handler.
1474 * The timer interrupt is used for two purposes:
1475 * 1) Processing udp calls from 'cpipemon'.
1476 * 2) Reading board-level statistics for updating the proc file system.
1477 */
1478void timer_intr(sdla_t *card)
1479{
1480 struct net_device* dev;
1481 chdlc_private_area_t* chdlc_priv_area = NULL;
1482 SHARED_MEMORY_INFO_STRUCT* flags = NULL;
1483
1484 dev = card->wandev.dev;
1485 chdlc_priv_area = dev->priv;
1486
1487 if (chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_CONFIG) {
1488 if (!config_chdlc(card)){
1489 chdlc_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_CONFIG;
1490 }
1491 }
1492
1493 /* process a udp call if pending */
1494 if(chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_UDP) {
1495 process_udp_mgmt_pkt(card, dev,
1496 chdlc_priv_area);
1497 chdlc_priv_area->timer_int_enabled &= ~TMR_INT_ENABLED_UDP;
1498 }
1499
1500
1501 /* read the communications statistics if required */
1502 if(chdlc_priv_area->timer_int_enabled & TMR_INT_ENABLED_UPDATE) {
1503 update_comms_stats(card, chdlc_priv_area);
1504 if(!(-- chdlc_priv_area->update_comms_stats)) {
1505 chdlc_priv_area->timer_int_enabled &=
1506 ~TMR_INT_ENABLED_UPDATE;
1507 }
1508 }
1509
1510 /* only disable the timer interrupt if there are no udp or statistic */
1511 /* updates pending */
1512 if(!chdlc_priv_area->timer_int_enabled) {
1513 flags = card->u.c.flags;
1514 flags->interrupt_info_struct.interrupt_permission &=
1515 ~APP_INT_ON_TIMER;
1516 }
1517}
1518
1519/*------------------------------------------------------------------------------
1520 Miscellaneous Functions
1521 - set_chdlc_config() used to set configuration options on the board
1522------------------------------------------------------------------------------*/
1523
1524static int set_chdlc_config(sdla_t* card)
1525{
1526
1527 CHDLC_CONFIGURATION_STRUCT cfg;
1528
1529 memset(&cfg, 0, sizeof(CHDLC_CONFIGURATION_STRUCT));
1530
1531 if(card->wandev.clocking)
1532 cfg.baud_rate = card->wandev.bps;
1533
1534 cfg.line_config_options = (card->wandev.interface == WANOPT_RS232) ?
1535 INTERFACE_LEVEL_RS232 : INTERFACE_LEVEL_V35;
1536
1537 cfg.modem_config_options = 0;
1538 //API OPTIONS
1539 cfg.CHDLC_API_options = DISCARD_RX_ERROR_FRAMES;
1540 cfg.modem_status_timer = 100;
1541 cfg.CHDLC_protocol_options = HDLC_STREAMING_MODE;
1542 cfg.percent_data_buffer_for_Tx = 50;
1543 cfg.CHDLC_statistics_options = (CHDLC_TX_DATA_BYTE_COUNT_STAT |
1544 CHDLC_RX_DATA_BYTE_COUNT_STAT);
1545 cfg.max_CHDLC_data_field_length = card->wandev.mtu;
1546
1547 cfg.transmit_keepalive_timer = 0;
1548 cfg.receive_keepalive_timer = 0;
1549 cfg.keepalive_error_tolerance = 0;
1550 cfg.SLARP_request_timer = 0;
1551
1552 cfg.IP_address = 0;
1553 cfg.IP_netmask = 0;
1554
1555 return chdlc_configure(card, &cfg);
1556}
1557
1558/*============================================================================
1559 * Process global exception condition
1560 */
1561static int process_global_exception(sdla_t *card)
1562{
1563 CHDLC_MAILBOX_STRUCT* mbox = card->mbox;
1564 int err;
1565
1566 mbox->buffer_length = 0;
1567 mbox->command = READ_GLOBAL_EXCEPTION_CONDITION;
1568 err = sdla_exec(mbox) ? mbox->return_code : CMD_TIMEOUT;
1569
1570 if(err != CMD_TIMEOUT ){
1571
1572 switch(mbox->return_code) {
1573
1574 case EXCEP_MODEM_STATUS_CHANGE:
1575
1576 printk(KERN_INFO "%s: Modem status change\n",
1577 card->devname);
1578
1579 switch(mbox->data[0] & (DCD_HIGH | CTS_HIGH)) {
1580 case (DCD_HIGH):
1581 printk(KERN_INFO "%s: DCD high, CTS low\n",card->devname);
1582 break;
1583 case (CTS_HIGH):
1584 printk(KERN_INFO "%s: DCD low, CTS high\n",card->devname);
1585 break;
1586 case ((DCD_HIGH | CTS_HIGH)):
1587 printk(KERN_INFO "%s: DCD high, CTS high\n",card->devname);
1588 break;
1589 default:
1590 printk(KERN_INFO "%s: DCD low, CTS low\n",card->devname);
1591 break;
1592 }
1593
1594 if (!(mbox->data[0] & DCD_HIGH) || !(mbox->data[0] & DCD_HIGH)){
1595 //printk(KERN_INFO "Sending TERM Request Manually !\n");
1596 send_ppp_term_request(card->wandev.dev);
1597 }
1598 break;
1599
1600 case EXCEP_TRC_DISABLED:
1601 printk(KERN_INFO "%s: Line trace disabled\n",
1602 card->devname);
1603 break;
1604
1605 case EXCEP_IRQ_TIMEOUT:
1606 printk(KERN_INFO "%s: IRQ timeout occurred\n",
1607 card->devname);
1608 break;
1609
1610 default:
1611 printk(KERN_INFO "%s: Global exception %x\n",
1612 card->devname, mbox->return_code);
1613 break;
1614 }
1615 }
1616 return 0;
1617}
1618
1619
1620/*============================================================================
1621 * Process chdlc exception condition
1622 */
1623static int process_chdlc_exception(sdla_t *card)
1624{
1625 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
1626 int err;
1627
1628 mb->buffer_length = 0;
1629 mb->command = READ_CHDLC_EXCEPTION_CONDITION;
1630 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1631 if(err != CMD_TIMEOUT) {
1632
1633 switch (err) {
1634
1635 case EXCEP_LINK_ACTIVE:
1636 port_set_state(card, WAN_CONNECTED);
1637 break;
1638
1639 case EXCEP_LINK_INACTIVE_MODEM:
1640 port_set_state(card, WAN_DISCONNECTED);
1641 break;
1642
1643 case EXCEP_LOOPBACK_CONDITION:
1644 printk(KERN_INFO "%s: Loopback Condition Detected.\n",
1645 card->devname);
1646 break;
1647
1648 case NO_CHDLC_EXCEP_COND_TO_REPORT:
1649 printk(KERN_INFO "%s: No exceptions reported.\n",
1650 card->devname);
1651 break;
1652 default:
1653 printk(KERN_INFO "%s: Exception Condition %x!\n",
1654 card->devname,err);
1655 break;
1656 }
1657
1658 }
1659 return 0;
1660}
1661
1662
1663/*=============================================================================
1664 * Store a UDP management packet for later processing.
1665 */
1666
1667static int store_udp_mgmt_pkt(char udp_pkt_src, sdla_t* card,
1668 struct sk_buff *skb, struct net_device* dev,
1669 chdlc_private_area_t* chdlc_priv_area )
1670{
1671 int udp_pkt_stored = 0;
1672
1673 if(!chdlc_priv_area->udp_pkt_lgth &&
1674 (skb->len <= MAX_LGTH_UDP_MGNT_PKT)) {
1675 chdlc_priv_area->udp_pkt_lgth = skb->len;
1676 chdlc_priv_area->udp_pkt_src = udp_pkt_src;
1677 memcpy(chdlc_priv_area->udp_pkt_data, skb->data, skb->len);
1678 chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UDP;
1679 udp_pkt_stored = 1;
1680 }
1681
1682 if(udp_pkt_src == UDP_PKT_FRM_STACK)
1683 dev_kfree_skb_any(skb);
1684 else
1685 dev_kfree_skb_any(skb);
1686
1687 return(udp_pkt_stored);
1688}
1689
1690
1691/*=============================================================================
1692 * Process UDP management packet.
1693 */
1694
1695static int process_udp_mgmt_pkt(sdla_t* card, struct net_device* dev,
1696 chdlc_private_area_t* chdlc_priv_area )
1697{
1698 unsigned char *buf;
1699 unsigned int frames, len;
1700 struct sk_buff *new_skb;
1701 unsigned short buffer_length, real_len;
1702 unsigned long data_ptr;
1703 unsigned data_length;
1704 int udp_mgmt_req_valid = 1;
1705 CHDLC_MAILBOX_STRUCT *mb = card->mbox;
1706 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
1707 chdlc_udp_pkt_t *chdlc_udp_pkt;
1708 struct timeval tv;
1709 int err;
1710 char ut_char;
1711
1712 chdlc_udp_pkt = (chdlc_udp_pkt_t *) chdlc_priv_area->udp_pkt_data;
1713
1714 if(chdlc_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK) {
1715
1716 switch(chdlc_udp_pkt->cblock.command) {
1717 case READ_GLOBAL_STATISTICS:
1718 case READ_MODEM_STATUS:
1719 case READ_CHDLC_LINK_STATUS:
1720 case CPIPE_ROUTER_UP_TIME:
1721 case READ_COMMS_ERROR_STATS:
1722 case READ_CHDLC_OPERATIONAL_STATS:
1723
1724 /* These two commands are executed for
1725 * each request */
1726 case READ_CHDLC_CONFIGURATION:
1727 case READ_CHDLC_CODE_VERSION:
1728 udp_mgmt_req_valid = 1;
1729 break;
1730 default:
1731 udp_mgmt_req_valid = 0;
1732 break;
1733 }
1734 }
1735
1736 if(!udp_mgmt_req_valid) {
1737
1738 /* set length to 0 */
1739 chdlc_udp_pkt->cblock.buffer_length = 0;
1740
1741 /* set return code */
1742 chdlc_udp_pkt->cblock.return_code = 0xCD;
1743
1744 if (net_ratelimit()){
1745 printk(KERN_INFO
1746 "%s: Warning, Illegal UDP command attempted from network: %x\n",
1747 card->devname,chdlc_udp_pkt->cblock.command);
1748 }
1749
1750 } else {
1751 unsigned long trace_status_cfg_addr = 0;
1752 TRACE_STATUS_EL_CFG_STRUCT trace_cfg_struct;
1753 TRACE_STATUS_ELEMENT_STRUCT trace_element_struct;
1754
1755 switch(chdlc_udp_pkt->cblock.command) {
1756
1757 case CPIPE_ENABLE_TRACING:
1758 if (!chdlc_priv_area->TracingEnabled) {
1759
1760 /* OPERATE_DATALINE_MONITOR */
1761
1762 mb->buffer_length = sizeof(LINE_TRACE_CONFIG_STRUCT);
1763 mb->command = SET_TRACE_CONFIGURATION;
1764
1765 ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
1766 trace_config = TRACE_ACTIVE;
1767 /* Trace delay mode is not used because it slows
1768 down transfer and results in a standoff situation
1769 when there is a lot of data */
1770
1771 /* Configure the Trace based on user inputs */
1772 ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->trace_config |=
1773 chdlc_udp_pkt->data[0];
1774
1775 ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
1776 trace_deactivation_timer = 4000;
1777
1778
1779 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1780 if (err != COMMAND_OK) {
1781 chdlc_error(card,err,mb);
1782 card->TracingEnabled = 0;
1783 chdlc_udp_pkt->cblock.return_code = err;
1784 mb->buffer_length = 0;
1785 break;
1786 }
1787
1788 /* Get the base address of the trace element list */
1789 mb->buffer_length = 0;
1790 mb->command = READ_TRACE_CONFIGURATION;
1791 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1792
1793 if (err != COMMAND_OK) {
1794 chdlc_error(card,err,mb);
1795 chdlc_priv_area->TracingEnabled = 0;
1796 chdlc_udp_pkt->cblock.return_code = err;
1797 mb->buffer_length = 0;
1798 break;
1799 }
1800
1801 trace_status_cfg_addr =((LINE_TRACE_CONFIG_STRUCT *)
1802 mb->data) -> ptr_trace_stat_el_cfg_struct;
1803
1804 sdla_peek(&card->hw, trace_status_cfg_addr,
1805 &trace_cfg_struct, sizeof(trace_cfg_struct));
1806
1807 chdlc_priv_area->start_trace_addr = trace_cfg_struct.
1808 base_addr_trace_status_elements;
1809
1810 chdlc_priv_area->number_trace_elements =
1811 trace_cfg_struct.number_trace_status_elements;
1812
1813 chdlc_priv_area->end_trace_addr = (unsigned long)
1814 ((TRACE_STATUS_ELEMENT_STRUCT *)
1815 chdlc_priv_area->start_trace_addr +
1816 (chdlc_priv_area->number_trace_elements - 1));
1817
1818 chdlc_priv_area->base_addr_trace_buffer =
1819 trace_cfg_struct.base_addr_trace_buffer;
1820
1821 chdlc_priv_area->end_addr_trace_buffer =
1822 trace_cfg_struct.end_addr_trace_buffer;
1823
1824 chdlc_priv_area->curr_trace_addr =
1825 trace_cfg_struct.next_trace_element_to_use;
1826
1827 chdlc_priv_area->available_buffer_space = 2000 -
1828 sizeof(ip_pkt_t) -
1829 sizeof(udp_pkt_t) -
1830 sizeof(wp_mgmt_t) -
1831 sizeof(cblock_t) -
1832 sizeof(trace_info_t);
1833 }
1834 chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
1835 mb->buffer_length = 0;
1836 chdlc_priv_area->TracingEnabled = 1;
1837 break;
1838
1839
1840 case CPIPE_DISABLE_TRACING:
1841 if (chdlc_priv_area->TracingEnabled) {
1842
1843 /* OPERATE_DATALINE_MONITOR */
1844 mb->buffer_length = sizeof(LINE_TRACE_CONFIG_STRUCT);
1845 mb->command = SET_TRACE_CONFIGURATION;
1846 ((LINE_TRACE_CONFIG_STRUCT *)mb->data)->
1847 trace_config = TRACE_INACTIVE;
1848 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
1849 }
1850
1851 chdlc_priv_area->TracingEnabled = 0;
1852 chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
1853 mb->buffer_length = 0;
1854 break;
1855
1856
1857 case CPIPE_GET_TRACE_INFO:
1858
1859 if (!chdlc_priv_area->TracingEnabled) {
1860 chdlc_udp_pkt->cblock.return_code = 1;
1861 mb->buffer_length = 0;
1862 break;
1863 }
1864
1865 chdlc_udp_pkt->trace_info.ismoredata = 0x00;
1866 buffer_length = 0; /* offset of packet already occupied */
1867
1868 for (frames=0; frames < chdlc_priv_area->number_trace_elements; frames++){
1869
1870 trace_pkt_t *trace_pkt = (trace_pkt_t *)
1871 &chdlc_udp_pkt->data[buffer_length];
1872
1873 sdla_peek(&card->hw, chdlc_priv_area->curr_trace_addr,
1874 (unsigned char *)&trace_element_struct,
1875 sizeof(TRACE_STATUS_ELEMENT_STRUCT));
1876
1877 if (trace_element_struct.opp_flag == 0x00) {
1878 break;
1879 }
1880
1881 /* get pointer to real data */
1882 data_ptr = trace_element_struct.ptr_data_bfr;
1883
1884 /* See if there is actual data on the trace buffer */
1885 if (data_ptr){
1886 data_length = trace_element_struct.trace_length;
1887 }else{
1888 data_length = 0;
1889 chdlc_udp_pkt->trace_info.ismoredata = 0x01;
1890 }
1891
1892 if( (chdlc_priv_area->available_buffer_space - buffer_length)
1893 < ( sizeof(trace_pkt_t) + data_length) ) {
1894
1895 /* indicate there are more frames on board & exit */
1896 chdlc_udp_pkt->trace_info.ismoredata = 0x01;
1897 break;
1898 }
1899
1900 trace_pkt->status = trace_element_struct.trace_type;
1901
1902 trace_pkt->time_stamp =
1903 trace_element_struct.trace_time_stamp;
1904
1905 trace_pkt->real_length =
1906 trace_element_struct.trace_length;
1907
1908 /* see if we can fit the frame into the user buffer */
1909 real_len = trace_pkt->real_length;
1910
1911 if (data_ptr == 0) {
1912 trace_pkt->data_avail = 0x00;
1913 } else {
1914 unsigned tmp = 0;
1915
1916 /* get the data from circular buffer
1917 must check for end of buffer */
1918 trace_pkt->data_avail = 0x01;
1919
1920 if ((data_ptr + real_len) >
1921 chdlc_priv_area->end_addr_trace_buffer + 1){
1922
1923 tmp = chdlc_priv_area->end_addr_trace_buffer - data_ptr + 1;
1924 sdla_peek(&card->hw, data_ptr,
1925 trace_pkt->data,tmp);
1926 data_ptr = chdlc_priv_area->base_addr_trace_buffer;
1927 }
1928
1929 sdla_peek(&card->hw, data_ptr,
1930 &trace_pkt->data[tmp], real_len - tmp);
1931 }
1932
1933 /* zero the opp flag to show we got the frame */
1934 ut_char = 0x00;
1935 sdla_poke(&card->hw, chdlc_priv_area->curr_trace_addr, &ut_char, 1);
1936
1937 /* now move onto the next frame */
1938 chdlc_priv_area->curr_trace_addr += sizeof(TRACE_STATUS_ELEMENT_STRUCT);
1939
1940 /* check if we went over the last address */
1941 if ( chdlc_priv_area->curr_trace_addr > chdlc_priv_area->end_trace_addr ) {
1942 chdlc_priv_area->curr_trace_addr = chdlc_priv_area->start_trace_addr;
1943 }
1944
1945 if(trace_pkt->data_avail == 0x01) {
1946 buffer_length += real_len - 1;
1947 }
1948
1949 /* for the header */
1950 buffer_length += sizeof(trace_pkt_t);
1951
1952 } /* For Loop */
1953
1954 if (frames == chdlc_priv_area->number_trace_elements){
1955 chdlc_udp_pkt->trace_info.ismoredata = 0x01;
1956 }
1957 chdlc_udp_pkt->trace_info.num_frames = frames;
1958
1959 mb->buffer_length = buffer_length;
1960 chdlc_udp_pkt->cblock.buffer_length = buffer_length;
1961
1962 chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
1963
1964 break;
1965
1966
1967 case CPIPE_FT1_READ_STATUS:
1968 ((unsigned char *)chdlc_udp_pkt->data )[0] =
1969 flags->FT1_info_struct.parallel_port_A_input;
1970
1971 ((unsigned char *)chdlc_udp_pkt->data )[1] =
1972 flags->FT1_info_struct.parallel_port_B_input;
1973
1974 chdlc_udp_pkt->cblock.return_code = COMMAND_OK;
1975 mb->buffer_length = 2;
1976 break;
1977
1978 case CPIPE_ROUTER_UP_TIME:
1979 do_gettimeofday( &tv );
1980 chdlc_priv_area->router_up_time = tv.tv_sec -
1981 chdlc_priv_area->router_start_time;
1982 *(unsigned long *)&chdlc_udp_pkt->data =
1983 chdlc_priv_area->router_up_time;
1984 mb->buffer_length = sizeof(unsigned long);
1985 break;
1986
1987 case FT1_MONITOR_STATUS_CTRL:
1988 /* Enable FT1 MONITOR STATUS */
1989 if ((chdlc_udp_pkt->data[0] & ENABLE_READ_FT1_STATUS) ||
1990 (chdlc_udp_pkt->data[0] & ENABLE_READ_FT1_OP_STATS)) {
1991
1992 if( rCount++ != 0 ) {
1993 chdlc_udp_pkt->cblock.
1994 return_code = COMMAND_OK;
1995 mb->buffer_length = 1;
1996 break;
1997 }
1998 }
1999
2000 /* Disable FT1 MONITOR STATUS */
2001 if( chdlc_udp_pkt->data[0] == 0) {
2002
2003 if( --rCount != 0) {
2004 chdlc_udp_pkt->cblock.
2005 return_code = COMMAND_OK;
2006 mb->buffer_length = 1;
2007 break;
2008 }
2009 }
2010
2011 default:
2012 /* it's a board command */
2013 mb->command = chdlc_udp_pkt->cblock.command;
2014 mb->buffer_length = chdlc_udp_pkt->cblock.buffer_length;
2015 if (mb->buffer_length) {
2016 memcpy(&mb->data, (unsigned char *) chdlc_udp_pkt->
2017 data, mb->buffer_length);
2018 }
2019 /* run the command on the board */
2020 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
2021 if (err != COMMAND_OK) {
2022 break;
2023 }
2024
2025 /* copy the result back to our buffer */
2026 memcpy(&chdlc_udp_pkt->cblock, mb, sizeof(cblock_t));
2027
2028 if (mb->buffer_length) {
2029 memcpy(&chdlc_udp_pkt->data, &mb->data,
2030 mb->buffer_length);
2031 }
2032
2033 } /* end of switch */
2034 } /* end of else */
2035
2036 /* Fill UDP TTL */
2037 chdlc_udp_pkt->ip_pkt.ttl = card->wandev.ttl;
2038
2039 len = reply_udp(chdlc_priv_area->udp_pkt_data, mb->buffer_length);
2040
2041 if(chdlc_priv_area->udp_pkt_src == UDP_PKT_FRM_NETWORK) {
2042 if(!chdlc_send(card, chdlc_priv_area->udp_pkt_data, len)) {
2043 ++ card->wandev.stats.tx_packets;
2044 card->wandev.stats.tx_bytes += len;
2045 }
2046 } else {
2047
2048 /* Pass it up the stack
2049 Allocate socket buffer */
2050 if ((new_skb = dev_alloc_skb(len)) != NULL) {
2051 /* copy data into new_skb */
2052
2053 buf = skb_put(new_skb, len);
2054 memcpy(buf, chdlc_priv_area->udp_pkt_data, len);
2055
2056 /* Decapsulate pkt and pass it up the protocol stack */
2057 new_skb->protocol = htons(ETH_P_IP);
2058 new_skb->dev = dev;
2059 new_skb->mac.raw = new_skb->data;
2060
2061 netif_rx(new_skb);
2062 dev->last_rx = jiffies;
2063 } else {
2064
2065 printk(KERN_INFO "%s: no socket buffers available!\n",
2066 card->devname);
2067 }
2068 }
2069
2070 chdlc_priv_area->udp_pkt_lgth = 0;
2071
2072 return 0;
2073}
2074
2075/*============================================================================
2076 * Initialize Receive and Transmit Buffers.
2077 */
2078
2079static void init_chdlc_tx_rx_buff(sdla_t* card, struct net_device *dev)
2080{
2081 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
2082 CHDLC_TX_STATUS_EL_CFG_STRUCT *tx_config;
2083 CHDLC_RX_STATUS_EL_CFG_STRUCT *rx_config;
2084 char err;
2085
2086 mb->buffer_length = 0;
2087 mb->command = READ_CHDLC_CONFIGURATION;
2088 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
2089
2090 if(err != COMMAND_OK) {
2091 chdlc_error(card,err,mb);
2092 return;
2093 }
2094
2095 if(card->hw.type == SDLA_S514) {
2096 tx_config = (CHDLC_TX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
2097 (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
2098 ptr_CHDLC_Tx_stat_el_cfg_struct));
2099 rx_config = (CHDLC_RX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
2100 (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
2101 ptr_CHDLC_Rx_stat_el_cfg_struct));
2102
2103 /* Setup Head and Tails for buffers */
2104 card->u.c.txbuf_base = (void *)(card->hw.dpmbase +
2105 tx_config->base_addr_Tx_status_elements);
2106 card->u.c.txbuf_last =
2107 (CHDLC_DATA_TX_STATUS_EL_STRUCT *)
2108 card->u.c.txbuf_base +
2109 (tx_config->number_Tx_status_elements - 1);
2110
2111 card->u.c.rxbuf_base = (void *)(card->hw.dpmbase +
2112 rx_config->base_addr_Rx_status_elements);
2113 card->u.c.rxbuf_last =
2114 (CHDLC_DATA_RX_STATUS_EL_STRUCT *)
2115 card->u.c.rxbuf_base +
2116 (rx_config->number_Rx_status_elements - 1);
2117
2118 /* Set up next pointer to be used */
2119 card->u.c.txbuf = (void *)(card->hw.dpmbase +
2120 tx_config->next_Tx_status_element_to_use);
2121 card->u.c.rxmb = (void *)(card->hw.dpmbase +
2122 rx_config->next_Rx_status_element_to_use);
2123 }
2124 else {
2125 tx_config = (CHDLC_TX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
2126 (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
2127 ptr_CHDLC_Tx_stat_el_cfg_struct % SDLA_WINDOWSIZE));
2128
2129 rx_config = (CHDLC_RX_STATUS_EL_CFG_STRUCT *)(card->hw.dpmbase +
2130 (((CHDLC_CONFIGURATION_STRUCT *)mb->data)->
2131 ptr_CHDLC_Rx_stat_el_cfg_struct % SDLA_WINDOWSIZE));
2132
2133 /* Setup Head and Tails for buffers */
2134 card->u.c.txbuf_base = (void *)(card->hw.dpmbase +
2135 (tx_config->base_addr_Tx_status_elements % SDLA_WINDOWSIZE));
2136 card->u.c.txbuf_last =
2137 (CHDLC_DATA_TX_STATUS_EL_STRUCT *)card->u.c.txbuf_base
2138 + (tx_config->number_Tx_status_elements - 1);
2139 card->u.c.rxbuf_base = (void *)(card->hw.dpmbase +
2140 (rx_config->base_addr_Rx_status_elements % SDLA_WINDOWSIZE));
2141 card->u.c.rxbuf_last =
2142 (CHDLC_DATA_RX_STATUS_EL_STRUCT *)card->u.c.rxbuf_base
2143 + (rx_config->number_Rx_status_elements - 1);
2144
2145 /* Set up next pointer to be used */
2146 card->u.c.txbuf = (void *)(card->hw.dpmbase +
2147 (tx_config->next_Tx_status_element_to_use % SDLA_WINDOWSIZE));
2148 card->u.c.rxmb = (void *)(card->hw.dpmbase +
2149 (rx_config->next_Rx_status_element_to_use % SDLA_WINDOWSIZE));
2150 }
2151
2152 /* Setup Actual Buffer Start and end addresses */
2153 card->u.c.rx_base = rx_config->base_addr_Rx_buffer;
2154 card->u.c.rx_top = rx_config->end_addr_Rx_buffer;
2155
2156}
2157
2158/*=============================================================================
2159 * Perform Interrupt Test by running READ_CHDLC_CODE_VERSION command MAX_INTR
2160 * _TEST_COUNTER times.
2161 */
2162static int intr_test( sdla_t* card)
2163{
2164 CHDLC_MAILBOX_STRUCT* mb = card->mbox;
2165 int err,i;
2166
2167 Intr_test_counter = 0;
2168
2169 /* The critical flag is unset because during initialization (if_open)
2170 * we want the interrupts to be enabled so that when the wpc_isr is
2171 * called it does not exit due to critical flag set.
2172 */
2173
2174 err = chdlc_set_intr_mode(card, APP_INT_ON_COMMAND_COMPLETE);
2175
2176 if (err == CMD_OK) {
2177 for (i = 0; i < MAX_INTR_TEST_COUNTER; i ++) {
2178 mb->buffer_length = 0;
2179 mb->command = READ_CHDLC_CODE_VERSION;
2180 err = sdla_exec(mb) ? mb->return_code : CMD_TIMEOUT;
2181 }
2182 }
2183 else {
2184 return err;
2185 }
2186
2187 err = chdlc_set_intr_mode(card, 0);
2188
2189 if (err != CMD_OK)
2190 return err;
2191
2192 return 0;
2193}
2194
2195/*==============================================================================
2196 * Determine what type of UDP call it is. CPIPEAB ?
2197 */
2198static int udp_pkt_type(struct sk_buff *skb, sdla_t* card)
2199{
2200 chdlc_udp_pkt_t *chdlc_udp_pkt = (chdlc_udp_pkt_t *)skb->data;
2201
2202 if (!strncmp(chdlc_udp_pkt->wp_mgmt.signature,UDPMGMT_SIGNATURE,8) &&
2203 (chdlc_udp_pkt->udp_pkt.udp_dst_port == ntohs(card->wandev.udp_port)) &&
2204 (chdlc_udp_pkt->ip_pkt.protocol == UDPMGMT_UDP_PROTOCOL) &&
2205 (chdlc_udp_pkt->wp_mgmt.request_reply == UDPMGMT_REQUEST)) {
2206 return UDP_CPIPE_TYPE;
2207 }
2208 else return UDP_INVALID_TYPE;
2209}
2210
2211/*============================================================================
2212 * Set PORT state.
2213 */
2214static void port_set_state (sdla_t *card, int state)
2215{
2216 struct net_device *dev = card->wandev.dev;
2217 chdlc_private_area_t *chdlc_priv_area = dev->priv;
2218
2219 if (card->u.c.state != state)
2220 {
2221 switch (state)
2222 {
2223 case WAN_CONNECTED:
2224 printk (KERN_INFO "%s: HDLC link connected!\n",
2225 card->devname);
2226 break;
2227
2228 case WAN_CONNECTING:
2229 printk (KERN_INFO "%s: HDLC link connecting...\n",
2230 card->devname);
2231 break;
2232
2233 case WAN_DISCONNECTED:
2234 printk (KERN_INFO "%s: HDLC link disconnected!\n",
2235 card->devname);
2236 break;
2237 }
2238
2239 card->wandev.state = card->u.c.state = state;
2240 chdlc_priv_area->common.state = state;
2241 }
2242}
2243
2244void s508_lock (sdla_t *card, unsigned long *smp_flags)
2245{
2246 spin_lock_irqsave(&card->wandev.lock, *smp_flags);
2247 if (card->next){
2248 /* It is ok to use spin_lock here, since we
2249 * already turned off interrupts */
2250 spin_lock(&card->next->wandev.lock);
2251 }
2252}
2253
2254void s508_unlock (sdla_t *card, unsigned long *smp_flags)
2255{
2256 if (card->next){
2257 spin_unlock(&card->next->wandev.lock);
2258 }
2259 spin_unlock_irqrestore(&card->wandev.lock, *smp_flags);
2260}
2261
2262
2263
2264/*===========================================================================
2265 * config_chdlc
2266 *
2267 * Configure the chdlc protocol and enable communications.
2268 *
2269 * The if_open() function binds this function to the poll routine.
2270 * Therefore, this function will run every time the chdlc interface
2271 * is brought up. We cannot run this function from the if_open
2272 * because if_open does not have access to the remote IP address.
2273 *
2274 * If the communications are not enabled, proceed to configure
2275 * the card and enable communications.
2276 *
2277 * If the communications are enabled, it means that the interface
2278 * was shutdown by ether the user or driver. In this case, we
2279 * have to check that the IP addresses have not changed. If
2280 * the IP addresses have changed, we have to reconfigure the firmware
2281 * and update the changed IP addresses. Otherwise, just exit.
2282 *
2283 */
2284
2285static int config_chdlc (sdla_t *card)
2286{
2287 struct net_device *dev = card->wandev.dev;
2288 SHARED_MEMORY_INFO_STRUCT *flags = card->u.c.flags;
2289
2290 if (card->u.c.comm_enabled){
2291 chdlc_comm_disable(card);
2292 port_set_state(card, WAN_DISCONNECTED);
2293 }
2294
2295 if (set_chdlc_config(card)) {
2296 printk(KERN_INFO "%s: CHDLC Configuration Failed!\n",
2297 card->devname);
2298 return 0;
2299 }
2300 init_chdlc_tx_rx_buff(card, dev);
2301
2302 /* Set interrupt mode and mask */
2303 if (chdlc_set_intr_mode(card, APP_INT_ON_RX_FRAME |
2304 APP_INT_ON_GLOBAL_EXCEP_COND |
2305 APP_INT_ON_TX_FRAME |
2306 APP_INT_ON_CHDLC_EXCEP_COND | APP_INT_ON_TIMER)){
2307 printk (KERN_INFO "%s: Failed to set interrupt triggers!\n",
2308 card->devname);
2309 return 0;
2310 }
2311
2312
2313 /* Mask the Transmit and Timer interrupt */
2314 flags->interrupt_info_struct.interrupt_permission &=
2315 ~(APP_INT_ON_TX_FRAME | APP_INT_ON_TIMER);
2316
2317
2318 if (chdlc_comm_enable(card) != 0) {
2319 printk(KERN_INFO "%s: Failed to enable chdlc communications!\n",
2320 card->devname);
2321 flags->interrupt_info_struct.interrupt_permission = 0;
2322 card->u.c.comm_enabled=0;
2323 chdlc_set_intr_mode(card,0);
2324 return 0;
2325 }
2326
2327 /* Initialize Rx/Tx buffer control fields */
2328 port_set_state(card, WAN_CONNECTING);
2329 return 0;
2330}
2331
2332
2333static void send_ppp_term_request(struct net_device *dev)
2334{
2335 struct sk_buff *new_skb;
2336 unsigned char *buf;
2337
2338 if ((new_skb = dev_alloc_skb(8)) != NULL) {
2339 /* copy data into new_skb */
2340
2341 buf = skb_put(new_skb, 8);
2342 sprintf(buf,"%c%c%c%c%c%c%c%c", 0xFF,0x03,0xC0,0x21,0x05,0x98,0x00,0x07);
2343
2344 /* Decapsulate pkt and pass it up the protocol stack */
2345 new_skb->protocol = htons(ETH_P_WAN_PPP);
2346 new_skb->dev = dev;
2347 new_skb->mac.raw = new_skb->data;
2348
2349 netif_rx(new_skb);
2350 dev->last_rx = jiffies;
2351 }
2352}
2353
2354
2355MODULE_LICENSE("GPL");
2356
2357/****** End ****************************************************************/
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
new file mode 100644
index 000000000000..1e7b47704ad9
--- /dev/null
+++ b/drivers/net/wan/wanxl.c
@@ -0,0 +1,839 @@
1/*
2 * wanXL serial card driver for Linux
3 * host part
4 *
5 * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 *
11 * Status:
12 * - Only DTE (external clock) support with NRZ and NRZI encodings
13 * - wanXL100 will require minor driver modifications, no access to hw
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/types.h>
21#include <linux/fcntl.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/netdevice.h>
27#include <linux/hdlc.h>
28#include <linux/pci.h>
29#include <asm/io.h>
30#include <asm/delay.h>
31
32#include "wanxl.h"
33
34static const char* version = "wanXL serial card driver version: 0.48";
35
36#define PLX_CTL_RESET 0x40000000 /* adapter reset */
37
38#undef DEBUG_PKT
39#undef DEBUG_PCI
40
41/* MAILBOX #1 - PUTS COMMANDS */
42#define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
43#ifdef __LITTLE_ENDIAN
44#define MBX1_CMD_BSWAP 0x8C000001 /* little-endian Byte Swap Mode */
45#else
46#define MBX1_CMD_BSWAP 0x8C000000 /* big-endian Byte Swap Mode */
47#endif
48
49/* MAILBOX #2 - DRAM SIZE */
50#define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
51
52
53typedef struct {
54 struct net_device *dev;
55 struct card_t *card;
56 spinlock_t lock; /* for wanxl_xmit */
57 int node; /* physical port #0 - 3 */
58 unsigned int clock_type;
59 int tx_in, tx_out;
60 struct sk_buff *tx_skbs[TX_BUFFERS];
61}port_t;
62
63
64typedef struct {
65 desc_t rx_descs[RX_QUEUE_LENGTH];
66 port_status_t port_status[4];
67}card_status_t;
68
69
70typedef struct card_t {
71 int n_ports; /* 1, 2 or 4 ports */
72 u8 irq;
73
74 u8 __iomem *plx; /* PLX PCI9060 virtual base address */
75 struct pci_dev *pdev; /* for pci_name(pdev) */
76 int rx_in;
77 struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
78 card_status_t *status; /* shared between host and card */
79 dma_addr_t status_address;
80 port_t ports[0]; /* 1 - 4 port_t structures follow */
81}card_t;
82
83
84
85static inline port_t* dev_to_port(struct net_device *dev)
86{
87 return (port_t *)dev_to_hdlc(dev)->priv;
88}
89
90
91static inline port_status_t* get_status(port_t *port)
92{
93 return &port->card->status->port_status[port->node];
94}
95
96
97#ifdef DEBUG_PCI
98static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
99 size_t size, int direction)
100{
101 dma_addr_t addr = pci_map_single(pdev, ptr, size, direction);
102 if (addr + size > 0x100000000LL)
103 printk(KERN_CRIT "wanXL %s: pci_map_single() returned memory"
104 " at 0x%LX!\n", pci_name(pdev),
105 (unsigned long long)addr);
106 return addr;
107}
108
109#undef pci_map_single
110#define pci_map_single pci_map_single_debug
111#endif
112
113
114/* Cable and/or personality module change interrupt service */
115static inline void wanxl_cable_intr(port_t *port)
116{
117 u32 value = get_status(port)->cable;
118 int valid = 1;
119 const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
120
121 switch(value & 0x7) {
122 case STATUS_CABLE_V35: cable = "V.35"; break;
123 case STATUS_CABLE_X21: cable = "X.21"; break;
124 case STATUS_CABLE_V24: cable = "V.24"; break;
125 case STATUS_CABLE_EIA530: cable = "EIA530"; break;
126 case STATUS_CABLE_NONE: cable = "no"; break;
127 default: cable = "invalid";
128 }
129
130 switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
131 case STATUS_CABLE_V35: pm = "V.35"; break;
132 case STATUS_CABLE_X21: pm = "X.21"; break;
133 case STATUS_CABLE_V24: pm = "V.24"; break;
134 case STATUS_CABLE_EIA530: pm = "EIA530"; break;
135 case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
136 default: pm = "invalid personality"; valid = 0;
137 }
138
139 if (valid) {
140 if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
141 dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
142 ", DSR off";
143 dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
144 ", carrier off";
145 }
146 dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
147 }
148 printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n",
149 port->dev->name, pm, dte, cable, dsr, dcd);
150
151 hdlc_set_carrier(value & STATUS_CABLE_DCD, port->dev);
152}
153
154
155
156/* Transmit complete interrupt service */
157static inline void wanxl_tx_intr(port_t *port)
158{
159 struct net_device *dev = port->dev;
160 struct net_device_stats *stats = hdlc_stats(dev);
161 while (1) {
162 desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
163 struct sk_buff *skb = port->tx_skbs[port->tx_in];
164
165 switch (desc->stat) {
166 case PACKET_FULL:
167 case PACKET_EMPTY:
168 netif_wake_queue(dev);
169 return;
170
171 case PACKET_UNDERRUN:
172 stats->tx_errors++;
173 stats->tx_fifo_errors++;
174 break;
175
176 default:
177 stats->tx_packets++;
178 stats->tx_bytes += skb->len;
179 }
180 desc->stat = PACKET_EMPTY; /* Free descriptor */
181 pci_unmap_single(port->card->pdev, desc->address, skb->len,
182 PCI_DMA_TODEVICE);
183 dev_kfree_skb_irq(skb);
184 port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
185 }
186}
187
188
189
190/* Receive complete interrupt service */
191static inline void wanxl_rx_intr(card_t *card)
192{
193 desc_t *desc;
194 while (desc = &card->status->rx_descs[card->rx_in],
195 desc->stat != PACKET_EMPTY) {
196 if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
197 printk(KERN_CRIT "wanXL %s: received packet for"
198 " nonexistent port\n", pci_name(card->pdev));
199 else {
200 struct sk_buff *skb = card->rx_skbs[card->rx_in];
201 port_t *port = &card->ports[desc->stat &
202 PACKET_PORT_MASK];
203 struct net_device *dev = port->dev;
204 struct net_device_stats *stats = hdlc_stats(dev);
205
206 if (!skb)
207 stats->rx_dropped++;
208 else {
209 pci_unmap_single(card->pdev, desc->address,
210 BUFFER_LENGTH,
211 PCI_DMA_FROMDEVICE);
212 skb_put(skb, desc->length);
213
214#ifdef DEBUG_PKT
215 printk(KERN_DEBUG "%s RX(%i):", dev->name,
216 skb->len);
217 debug_frame(skb);
218#endif
219 stats->rx_packets++;
220 stats->rx_bytes += skb->len;
221 dev->last_rx = jiffies;
222 skb->protocol = hdlc_type_trans(skb, dev);
223 netif_rx(skb);
224 skb = NULL;
225 }
226
227 if (!skb) {
228 skb = dev_alloc_skb(BUFFER_LENGTH);
229 desc->address = skb ?
230 pci_map_single(card->pdev, skb->data,
231 BUFFER_LENGTH,
232 PCI_DMA_FROMDEVICE) : 0;
233 card->rx_skbs[card->rx_in] = skb;
234 }
235 }
236 desc->stat = PACKET_EMPTY; /* Free descriptor */
237 card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
238 }
239}
240
241
242
243static irqreturn_t wanxl_intr(int irq, void* dev_id, struct pt_regs *regs)
244{
245 card_t *card = dev_id;
246 int i;
247 u32 stat;
248 int handled = 0;
249
250
251 while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
252 handled = 1;
253 writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
254
255 for (i = 0; i < card->n_ports; i++) {
256 if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
257 wanxl_tx_intr(&card->ports[i]);
258 if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
259 wanxl_cable_intr(&card->ports[i]);
260 }
261 if (stat & (1 << DOORBELL_FROM_CARD_RX))
262 wanxl_rx_intr(card);
263 }
264
265 return IRQ_RETVAL(handled);
266}
267
268
269
270static int wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
271{
272 port_t *port = dev_to_port(dev);
273 desc_t *desc;
274
275 spin_lock(&port->lock);
276
277 desc = &get_status(port)->tx_descs[port->tx_out];
278 if (desc->stat != PACKET_EMPTY) {
279 /* should never happen - previous xmit should stop queue */
280#ifdef DEBUG_PKT
281 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
282#endif
283 netif_stop_queue(dev);
284 spin_unlock_irq(&port->lock);
285 return 1; /* request packet to be queued */
286 }
287
288#ifdef DEBUG_PKT
289 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
290 debug_frame(skb);
291#endif
292
293 port->tx_skbs[port->tx_out] = skb;
294 desc->address = pci_map_single(port->card->pdev, skb->data, skb->len,
295 PCI_DMA_TODEVICE);
296 desc->length = skb->len;
297 desc->stat = PACKET_FULL;
298 writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
299 port->card->plx + PLX_DOORBELL_TO_CARD);
300 dev->trans_start = jiffies;
301
302 port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
303
304 if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
305 netif_stop_queue(dev);
306#ifdef DEBUG_PKT
307 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
308#endif
309 }
310
311 spin_unlock(&port->lock);
312 return 0;
313}
314
315
316
317static int wanxl_attach(struct net_device *dev, unsigned short encoding,
318 unsigned short parity)
319{
320 port_t *port = dev_to_port(dev);
321
322 if (encoding != ENCODING_NRZ &&
323 encoding != ENCODING_NRZI)
324 return -EINVAL;
325
326 if (parity != PARITY_NONE &&
327 parity != PARITY_CRC32_PR1_CCITT &&
328 parity != PARITY_CRC16_PR1_CCITT &&
329 parity != PARITY_CRC32_PR0_CCITT &&
330 parity != PARITY_CRC16_PR0_CCITT)
331 return -EINVAL;
332
333 get_status(port)->encoding = encoding;
334 get_status(port)->parity = parity;
335 return 0;
336}
337
338
339
340static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
341{
342 const size_t size = sizeof(sync_serial_settings);
343 sync_serial_settings line;
344 port_t *port = dev_to_port(dev);
345
346 if (cmd != SIOCWANDEV)
347 return hdlc_ioctl(dev, ifr, cmd);
348
349 switch (ifr->ifr_settings.type) {
350 case IF_GET_IFACE:
351 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
352 if (ifr->ifr_settings.size < size) {
353 ifr->ifr_settings.size = size; /* data size wanted */
354 return -ENOBUFS;
355 }
356 line.clock_type = get_status(port)->clocking;
357 line.clock_rate = 0;
358 line.loopback = 0;
359
360 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
361 return -EFAULT;
362 return 0;
363
364 case IF_IFACE_SYNC_SERIAL:
365 if (!capable(CAP_NET_ADMIN))
366 return -EPERM;
367 if (dev->flags & IFF_UP)
368 return -EBUSY;
369
370 if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
371 size))
372 return -EFAULT;
373
374 if (line.clock_type != CLOCK_EXT &&
375 line.clock_type != CLOCK_TXFROMRX)
376 return -EINVAL; /* No such clock setting */
377
378 if (line.loopback != 0)
379 return -EINVAL;
380
381 get_status(port)->clocking = line.clock_type;
382 return 0;
383
384 default:
385 return hdlc_ioctl(dev, ifr, cmd);
386 }
387}
388
389
390
391static int wanxl_open(struct net_device *dev)
392{
393 port_t *port = dev_to_port(dev);
394 u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
395 unsigned long timeout;
396 int i;
397
398 if (get_status(port)->open) {
399 printk(KERN_ERR "%s: port already open\n", dev->name);
400 return -EIO;
401 }
402 if ((i = hdlc_open(dev)) != 0)
403 return i;
404
405 port->tx_in = port->tx_out = 0;
406 for (i = 0; i < TX_BUFFERS; i++)
407 get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
408 /* signal the card */
409 writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
410
411 timeout = jiffies + HZ;
412 do
413 if (get_status(port)->open) {
414 netif_start_queue(dev);
415 return 0;
416 }
417 while (time_after(timeout, jiffies));
418
419 printk(KERN_ERR "%s: unable to open port\n", dev->name);
420 /* ask the card to close the port, should it be still alive */
421 writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
422 return -EFAULT;
423}
424
425
426
427static int wanxl_close(struct net_device *dev)
428{
429 port_t *port = dev_to_port(dev);
430 unsigned long timeout;
431 int i;
432
433 hdlc_close(dev);
434 /* signal the card */
435 writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
436 port->card->plx + PLX_DOORBELL_TO_CARD);
437
438 timeout = jiffies + HZ;
439 do
440 if (!get_status(port)->open)
441 break;
442 while (time_after(timeout, jiffies));
443
444 if (get_status(port)->open)
445 printk(KERN_ERR "%s: unable to close port\n", dev->name);
446
447 netif_stop_queue(dev);
448
449 for (i = 0; i < TX_BUFFERS; i++) {
450 desc_t *desc = &get_status(port)->tx_descs[i];
451
452 if (desc->stat != PACKET_EMPTY) {
453 desc->stat = PACKET_EMPTY;
454 pci_unmap_single(port->card->pdev, desc->address,
455 port->tx_skbs[i]->len,
456 PCI_DMA_TODEVICE);
457 dev_kfree_skb(port->tx_skbs[i]);
458 }
459 }
460 return 0;
461}
462
463
464
465static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
466{
467 struct net_device_stats *stats = hdlc_stats(dev);
468 port_t *port = dev_to_port(dev);
469
470 stats->rx_over_errors = get_status(port)->rx_overruns;
471 stats->rx_frame_errors = get_status(port)->rx_frame_errors;
472 stats->rx_errors = stats->rx_over_errors + stats->rx_frame_errors;
473 return stats;
474}
475
476
477
478static int wanxl_puts_command(card_t *card, u32 cmd)
479{
480 unsigned long timeout = jiffies + 5 * HZ;
481
482 writel(cmd, card->plx + PLX_MAILBOX_1);
483 do {
484 if (readl(card->plx + PLX_MAILBOX_1) == 0)
485 return 0;
486
487 schedule();
488 }while (time_after(timeout, jiffies));
489
490 return -1;
491}
492
493
494
495static void wanxl_reset(card_t *card)
496{
497 u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
498
499 writel(0x80, card->plx + PLX_MAILBOX_0);
500 writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
501 readl(card->plx + PLX_CONTROL); /* wait for posted write */
502 udelay(1);
503 writel(old_value, card->plx + PLX_CONTROL);
504 readl(card->plx + PLX_CONTROL); /* wait for posted write */
505}
506
507
508
509static void wanxl_pci_remove_one(struct pci_dev *pdev)
510{
511 card_t *card = pci_get_drvdata(pdev);
512 int i;
513
514 for (i = 0; i < card->n_ports; i++) {
515 unregister_hdlc_device(card->ports[i].dev);
516 free_netdev(card->ports[i].dev);
517 }
518
519 /* unregister and free all host resources */
520 if (card->irq)
521 free_irq(card->irq, card);
522
523 wanxl_reset(card);
524
525 for (i = 0; i < RX_QUEUE_LENGTH; i++)
526 if (card->rx_skbs[i]) {
527 pci_unmap_single(card->pdev,
528 card->status->rx_descs[i].address,
529 BUFFER_LENGTH, PCI_DMA_FROMDEVICE);
530 dev_kfree_skb(card->rx_skbs[i]);
531 }
532
533 if (card->plx)
534 iounmap(card->plx);
535
536 if (card->status)
537 pci_free_consistent(pdev, sizeof(card_status_t),
538 card->status, card->status_address);
539
540 pci_release_regions(pdev);
541 pci_disable_device(pdev);
542 pci_set_drvdata(pdev, NULL);
543 kfree(card);
544}
545
546
547#include "wanxlfw.inc"
548
549static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
550 const struct pci_device_id *ent)
551{
552 card_t *card;
553 u32 ramsize, stat;
554 unsigned long timeout;
555 u32 plx_phy; /* PLX PCI base address */
556 u32 mem_phy; /* memory PCI base addr */
557 u8 __iomem *mem; /* memory virtual base addr */
558 int i, ports, alloc_size;
559
560#ifndef MODULE
561 static int printed_version;
562 if (!printed_version) {
563 printed_version++;
564 printk(KERN_INFO "%s\n", version);
565 }
566#endif
567
568 i = pci_enable_device(pdev);
569 if (i)
570 return i;
571
572 /* QUICC can only access first 256 MB of host RAM directly,
573 but PLX9060 DMA does 32-bits for actual packet data transfers */
574
575 /* FIXME when PCI/DMA subsystems are fixed.
576 We set both dma_mask and consistent_dma_mask to 28 bits
577 and pray pci_alloc_consistent() will use this info. It should
578 work on most platforms */
579 if (pci_set_consistent_dma_mask(pdev, 0x0FFFFFFF) ||
580 pci_set_dma_mask(pdev, 0x0FFFFFFF)) {
581 printk(KERN_ERR "wanXL: No usable DMA configuration\n");
582 return -EIO;
583 }
584
585 i = pci_request_regions(pdev, "wanXL");
586 if (i) {
587 pci_disable_device(pdev);
588 return i;
589 }
590
591 switch (pdev->device) {
592 case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
593 case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
594 default: ports = 4;
595 }
596
597 alloc_size = sizeof(card_t) + ports * sizeof(port_t);
598 card = kmalloc(alloc_size, GFP_KERNEL);
599 if (card == NULL) {
600 printk(KERN_ERR "wanXL %s: unable to allocate memory\n",
601 pci_name(pdev));
602 pci_release_regions(pdev);
603 pci_disable_device(pdev);
604 return -ENOBUFS;
605 }
606 memset(card, 0, alloc_size);
607
608 pci_set_drvdata(pdev, card);
609 card->pdev = pdev;
610
611 card->status = pci_alloc_consistent(pdev, sizeof(card_status_t),
612 &card->status_address);
613 if (card->status == NULL) {
614 wanxl_pci_remove_one(pdev);
615 return -ENOBUFS;
616 }
617
618#ifdef DEBUG_PCI
619 printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
620 " at 0x%LX\n", pci_name(pdev),
621 (unsigned long long)card->status_address);
622#endif
623
624 /* FIXME when PCI/DMA subsystems are fixed.
625 We set both dma_mask and consistent_dma_mask back to 32 bits
626 to indicate the card can do 32-bit DMA addressing */
627 if (pci_set_consistent_dma_mask(pdev, 0xFFFFFFFF) ||
628 pci_set_dma_mask(pdev, 0xFFFFFFFF)) {
629 printk(KERN_ERR "wanXL: No usable DMA configuration\n");
630 wanxl_pci_remove_one(pdev);
631 return -EIO;
632 }
633
634 /* set up PLX mapping */
635 plx_phy = pci_resource_start(pdev, 0);
636 card->plx = ioremap_nocache(plx_phy, 0x70);
637
638#if RESET_WHILE_LOADING
639 wanxl_reset(card);
640#endif
641
642 timeout = jiffies + 20 * HZ;
643 while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
644 if (time_before(timeout, jiffies)) {
645 printk(KERN_WARNING "wanXL %s: timeout waiting for"
646 " PUTS to complete\n", pci_name(pdev));
647 wanxl_pci_remove_one(pdev);
648 return -ENODEV;
649 }
650
651 switch(stat & 0xC0) {
652 case 0x00: /* hmm - PUTS completed with non-zero code? */
653 case 0x80: /* PUTS still testing the hardware */
654 break;
655
656 default:
657 printk(KERN_WARNING "wanXL %s: PUTS test 0x%X"
658 " failed\n", pci_name(pdev), stat & 0x30);
659 wanxl_pci_remove_one(pdev);
660 return -ENODEV;
661 }
662
663 schedule();
664 }
665
666 /* get on-board memory size (PUTS detects no more than 4 MB) */
667 ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
668
669 /* set up on-board RAM mapping */
670 mem_phy = pci_resource_start(pdev, 2);
671
672
673 /* sanity check the board's reported memory size */
674 if (ramsize < BUFFERS_ADDR +
675 (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
676 printk(KERN_WARNING "wanXL %s: no enough on-board RAM"
677 " (%u bytes detected, %u bytes required)\n",
678 pci_name(pdev), ramsize, BUFFERS_ADDR +
679 (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
680 wanxl_pci_remove_one(pdev);
681 return -ENODEV;
682 }
683
684 if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
685 printk(KERN_WARNING "wanXL %s: unable to Set Byte Swap"
686 " Mode\n", pci_name(pdev));
687 wanxl_pci_remove_one(pdev);
688 return -ENODEV;
689 }
690
691 for (i = 0; i < RX_QUEUE_LENGTH; i++) {
692 struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
693 card->rx_skbs[i] = skb;
694 if (skb)
695 card->status->rx_descs[i].address =
696 pci_map_single(card->pdev, skb->data,
697 BUFFER_LENGTH,
698 PCI_DMA_FROMDEVICE);
699 }
700
701 mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
702 for (i = 0; i < sizeof(firmware); i += 4)
703 writel(htonl(*(u32*)(firmware + i)), mem + PDM_OFFSET + i);
704
705 for (i = 0; i < ports; i++)
706 writel(card->status_address +
707 (void *)&card->status->port_status[i] -
708 (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
709 writel(card->status_address, mem + PDM_OFFSET + 20);
710 writel(PDM_OFFSET, mem);
711 iounmap(mem);
712
713 writel(0, card->plx + PLX_MAILBOX_5);
714
715 if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
716 printk(KERN_WARNING "wanXL %s: unable to Abort and Jump\n",
717 pci_name(pdev));
718 wanxl_pci_remove_one(pdev);
719 return -ENODEV;
720 }
721
722 stat = 0;
723 timeout = jiffies + 5 * HZ;
724 do {
725 if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
726 break;
727 schedule();
728 }while (time_after(timeout, jiffies));
729
730 if (!stat) {
731 printk(KERN_WARNING "wanXL %s: timeout while initializing card"
732 "firmware\n", pci_name(pdev));
733 wanxl_pci_remove_one(pdev);
734 return -ENODEV;
735 }
736
737#if DETECT_RAM
738 ramsize = stat;
739#endif
740
741 printk(KERN_INFO "wanXL %s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
742 pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
743
744 /* Allocate IRQ */
745 if (request_irq(pdev->irq, wanxl_intr, SA_SHIRQ, "wanXL", card)) {
746 printk(KERN_WARNING "wanXL %s: could not allocate IRQ%i.\n",
747 pci_name(pdev), pdev->irq);
748 wanxl_pci_remove_one(pdev);
749 return -EBUSY;
750 }
751 card->irq = pdev->irq;
752
753 for (i = 0; i < ports; i++) {
754 hdlc_device *hdlc;
755 port_t *port = &card->ports[i];
756 struct net_device *dev = alloc_hdlcdev(port);
757 if (!dev) {
758 printk(KERN_ERR "wanXL %s: unable to allocate"
759 " memory\n", pci_name(pdev));
760 wanxl_pci_remove_one(pdev);
761 return -ENOMEM;
762 }
763
764 port->dev = dev;
765 hdlc = dev_to_hdlc(dev);
766 spin_lock_init(&port->lock);
767 SET_MODULE_OWNER(dev);
768 dev->tx_queue_len = 50;
769 dev->do_ioctl = wanxl_ioctl;
770 dev->open = wanxl_open;
771 dev->stop = wanxl_close;
772 hdlc->attach = wanxl_attach;
773 hdlc->xmit = wanxl_xmit;
774 dev->get_stats = wanxl_get_stats;
775 port->card = card;
776 port->node = i;
777 get_status(port)->clocking = CLOCK_EXT;
778 if (register_hdlc_device(dev)) {
779 printk(KERN_ERR "wanXL %s: unable to register hdlc"
780 " device\n", pci_name(pdev));
781 free_netdev(dev);
782 wanxl_pci_remove_one(pdev);
783 return -ENOBUFS;
784 }
785 card->n_ports++;
786 }
787
788 printk(KERN_INFO "wanXL %s: port", pci_name(pdev));
789 for (i = 0; i < ports; i++)
790 printk("%s #%i: %s", i ? "," : "", i,
791 card->ports[i].dev->name);
792 printk("\n");
793
794 for (i = 0; i < ports; i++)
795 wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
796
797 return 0;
798}
799
800static struct pci_device_id wanxl_pci_tbl[] __devinitdata = {
801 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
802 PCI_ANY_ID, 0, 0, 0 },
803 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
804 PCI_ANY_ID, 0, 0, 0 },
805 { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
806 PCI_ANY_ID, 0, 0, 0 },
807 { 0, }
808};
809
810
811static struct pci_driver wanxl_pci_driver = {
812 .name = "wanXL",
813 .id_table = wanxl_pci_tbl,
814 .probe = wanxl_pci_init_one,
815 .remove = wanxl_pci_remove_one,
816};
817
818
819static int __init wanxl_init_module(void)
820{
821#ifdef MODULE
822 printk(KERN_INFO "%s\n", version);
823#endif
824 return pci_module_init(&wanxl_pci_driver);
825}
826
827static void __exit wanxl_cleanup_module(void)
828{
829 pci_unregister_driver(&wanxl_pci_driver);
830}
831
832
833MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
834MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
835MODULE_LICENSE("GPL v2");
836MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
837
838module_init(wanxl_init_module);
839module_exit(wanxl_cleanup_module);
diff --git a/drivers/net/wan/wanxl.h b/drivers/net/wan/wanxl.h
new file mode 100644
index 000000000000..3f86558f8a6b
--- /dev/null
+++ b/drivers/net/wan/wanxl.h
@@ -0,0 +1,152 @@
1/*
2 * wanXL serial card driver for Linux
3 * definitions common to host driver and card firmware
4 *
5 * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
10 */
11
12#define RESET_WHILE_LOADING 0
13
14/* you must rebuild the firmware if any of the following is changed */
15#define DETECT_RAM 0 /* needed for > 4MB RAM, 16 MB maximum */
16#define QUICC_MEMCPY_USES_PLX 1 /* must be used if the host has > 256 MB RAM */
17
18
19#define STATUS_CABLE_V35 2
20#define STATUS_CABLE_X21 3
21#define STATUS_CABLE_V24 4
22#define STATUS_CABLE_EIA530 5
23#define STATUS_CABLE_INVALID 6
24#define STATUS_CABLE_NONE 7
25
26#define STATUS_CABLE_DCE 0x8000
27#define STATUS_CABLE_DSR 0x0010
28#define STATUS_CABLE_DCD 0x0008
29#define STATUS_CABLE_PM_SHIFT 5
30
31#define PDM_OFFSET 0x1000
32
33#define TX_BUFFERS 10 /* per port */
34#define RX_BUFFERS 30
35#define RX_QUEUE_LENGTH 40 /* card->host queue length - per card */
36
37#define PACKET_EMPTY 0x00
38#define PACKET_FULL 0x10
39#define PACKET_SENT 0x20 /* TX only */
40#define PACKET_UNDERRUN 0x30 /* TX only */
41#define PACKET_PORT_MASK 0x03 /* RX only */
42
43/* bit numbers in PLX9060 doorbell registers */
44#define DOORBELL_FROM_CARD_TX_0 0 /* packet sent by the card */
45#define DOORBELL_FROM_CARD_TX_1 1
46#define DOORBELL_FROM_CARD_TX_2 2
47#define DOORBELL_FROM_CARD_TX_3 3
48#define DOORBELL_FROM_CARD_RX 4
49#define DOORBELL_FROM_CARD_CABLE_0 5 /* cable/PM/etc. changed */
50#define DOORBELL_FROM_CARD_CABLE_1 6
51#define DOORBELL_FROM_CARD_CABLE_2 7
52#define DOORBELL_FROM_CARD_CABLE_3 8
53
54#define DOORBELL_TO_CARD_OPEN_0 0
55#define DOORBELL_TO_CARD_OPEN_1 1
56#define DOORBELL_TO_CARD_OPEN_2 2
57#define DOORBELL_TO_CARD_OPEN_3 3
58#define DOORBELL_TO_CARD_CLOSE_0 4
59#define DOORBELL_TO_CARD_CLOSE_1 5
60#define DOORBELL_TO_CARD_CLOSE_2 6
61#define DOORBELL_TO_CARD_CLOSE_3 7
62#define DOORBELL_TO_CARD_TX_0 8 /* outbound packet queued */
63#define DOORBELL_TO_CARD_TX_1 9
64#define DOORBELL_TO_CARD_TX_2 10
65#define DOORBELL_TO_CARD_TX_3 11
66
67/* firmware-only status bits, starting from last DOORBELL_TO_CARD + 1 */
68#define TASK_SCC_0 12
69#define TASK_SCC_1 13
70#define TASK_SCC_2 14
71#define TASK_SCC_3 15
72
73#define ALIGN32(x) (((x) + 3) & 0xFFFFFFFC)
74#define BUFFER_LENGTH ALIGN32(HDLC_MAX_MRU + 4) /* 4 bytes for 32-bit CRC */
75
76/* Address of TX and RX buffers in 68360 address space */
77#define BUFFERS_ADDR 0x4000 /* 16 KB */
78
79#ifndef __ASSEMBLER__
80#define PLX_OFFSET 0
81#else
82#define PLX_OFFSET PLX + 0x80
83#endif
84
85#define PLX_MAILBOX_0 (PLX_OFFSET + 0x40)
86#define PLX_MAILBOX_1 (PLX_OFFSET + 0x44)
87#define PLX_MAILBOX_2 (PLX_OFFSET + 0x48)
88#define PLX_MAILBOX_3 (PLX_OFFSET + 0x4C)
89#define PLX_MAILBOX_4 (PLX_OFFSET + 0x50)
90#define PLX_MAILBOX_5 (PLX_OFFSET + 0x54)
91#define PLX_MAILBOX_6 (PLX_OFFSET + 0x58)
92#define PLX_MAILBOX_7 (PLX_OFFSET + 0x5C)
93#define PLX_DOORBELL_TO_CARD (PLX_OFFSET + 0x60)
94#define PLX_DOORBELL_FROM_CARD (PLX_OFFSET + 0x64)
95#define PLX_INTERRUPT_CS (PLX_OFFSET + 0x68)
96#define PLX_CONTROL (PLX_OFFSET + 0x6C)
97
98#ifdef __ASSEMBLER__
99#define PLX_DMA_0_MODE (PLX + 0x100)
100#define PLX_DMA_0_PCI (PLX + 0x104)
101#define PLX_DMA_0_LOCAL (PLX + 0x108)
102#define PLX_DMA_0_LENGTH (PLX + 0x10C)
103#define PLX_DMA_0_DESC (PLX + 0x110)
104#define PLX_DMA_1_MODE (PLX + 0x114)
105#define PLX_DMA_1_PCI (PLX + 0x118)
106#define PLX_DMA_1_LOCAL (PLX + 0x11C)
107#define PLX_DMA_1_LENGTH (PLX + 0x120)
108#define PLX_DMA_1_DESC (PLX + 0x124)
109#define PLX_DMA_CMD_STS (PLX + 0x128)
110#define PLX_DMA_ARBITR_0 (PLX + 0x12C)
111#define PLX_DMA_ARBITR_1 (PLX + 0x130)
112#endif
113
114#define DESC_LENGTH 12
115
116/* offsets from start of status_t */
117/* card to host */
118#define STATUS_OPEN 0
119#define STATUS_CABLE (STATUS_OPEN + 4)
120#define STATUS_RX_OVERRUNS (STATUS_CABLE + 4)
121#define STATUS_RX_FRAME_ERRORS (STATUS_RX_OVERRUNS + 4)
122
123/* host to card */
124#define STATUS_PARITY (STATUS_RX_FRAME_ERRORS + 4)
125#define STATUS_ENCODING (STATUS_PARITY + 4)
126#define STATUS_CLOCKING (STATUS_ENCODING + 4)
127#define STATUS_TX_DESCS (STATUS_CLOCKING + 4)
128
129#ifndef __ASSEMBLER__
130
131typedef struct {
132 volatile u32 stat;
133 u32 address; /* PCI address */
134 volatile u32 length;
135}desc_t;
136
137
138typedef struct {
139// Card to host
140 volatile u32 open;
141 volatile u32 cable;
142 volatile u32 rx_overruns;
143 volatile u32 rx_frame_errors;
144
145// Host to card
146 u32 parity;
147 u32 encoding;
148 u32 clocking;
149 desc_t tx_descs[TX_BUFFERS];
150}port_status_t;
151
152#endif /* __ASSEMBLER__ */
diff --git a/drivers/net/wan/wanxlfw.S b/drivers/net/wan/wanxlfw.S
new file mode 100644
index 000000000000..73aae2bf2f1c
--- /dev/null
+++ b/drivers/net/wan/wanxlfw.S
@@ -0,0 +1,895 @@
1.psize 0
2/*
3 wanXL serial card driver for Linux
4 card firmware part
5
6 Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
7
8 This program is free software; you can redistribute it and/or modify it
9 under the terms of version 2 of the GNU General Public License
10 as published by the Free Software Foundation.
11
12
13
14
15 DPRAM BDs:
16 0x000 - 0x050 TX#0 0x050 - 0x140 RX#0
17 0x140 - 0x190 TX#1 0x190 - 0x280 RX#1
18 0x280 - 0x2D0 TX#2 0x2D0 - 0x3C0 RX#2
19 0x3C0 - 0x410 TX#3 0x410 - 0x500 RX#3
20
21
22 000 5FF 1536 Bytes Dual-Port RAM User Data / BDs
23 600 6FF 256 Bytes Dual-Port RAM User Data / BDs
24 700 7FF 256 Bytes Dual-Port RAM User Data / BDs
25 C00 CBF 192 Bytes Dual-Port RAM Parameter RAM Page 1
26 D00 DBF 192 Bytes Dual-Port RAM Parameter RAM Page 2
27 E00 EBF 192 Bytes Dual-Port RAM Parameter RAM Page 3
28 F00 FBF 192 Bytes Dual-Port RAM Parameter RAM Page 4
29
30 local interrupts level
31 NMI 7
32 PIT timer, CPM (RX/TX complete) 4
33 PCI9060 DMA and PCI doorbells 3
34 Cable - not used 1
35*/
36
37#include <linux/hdlc.h>
38#include "wanxl.h"
39
40/* memory addresses and offsets */
41
42MAX_RAM_SIZE = 16 * 1024 * 1024 // max RAM supported by hardware
43
44PCI9060_VECTOR = 0x0000006C
45CPM_IRQ_BASE = 0x40
46ERROR_VECTOR = CPM_IRQ_BASE * 4
47SCC1_VECTOR = (CPM_IRQ_BASE + 0x1E) * 4
48SCC2_VECTOR = (CPM_IRQ_BASE + 0x1D) * 4
49SCC3_VECTOR = (CPM_IRQ_BASE + 0x1C) * 4
50SCC4_VECTOR = (CPM_IRQ_BASE + 0x1B) * 4
51CPM_IRQ_LEVEL = 4
52TIMER_IRQ = 128
53TIMER_IRQ_LEVEL = 4
54PITR_CONST = 0x100 + 16 // 1 Hz timer
55
56MBAR = 0x0003FF00
57
58VALUE_WINDOW = 0x40000000
59ORDER_WINDOW = 0xC0000000
60
61PLX = 0xFFF90000
62
63CSRA = 0xFFFB0000
64CSRB = 0xFFFB0002
65CSRC = 0xFFFB0004
66CSRD = 0xFFFB0006
67STATUS_CABLE_LL = 0x2000
68STATUS_CABLE_DTR = 0x1000
69
70DPRBASE = 0xFFFC0000
71
72SCC1_BASE = DPRBASE + 0xC00
73MISC_BASE = DPRBASE + 0xCB0
74SCC2_BASE = DPRBASE + 0xD00
75SCC3_BASE = DPRBASE + 0xE00
76SCC4_BASE = DPRBASE + 0xF00
77
78// offset from SCCx_BASE
79// SCC_xBASE contain offsets from DPRBASE and must be divisible by 8
80SCC_RBASE = 0 // 16-bit RxBD base address
81SCC_TBASE = 2 // 16-bit TxBD base address
82SCC_RFCR = 4 // 8-bit Rx function code
83SCC_TFCR = 5 // 8-bit Tx function code
84SCC_MRBLR = 6 // 16-bit maximum Rx buffer length
85SCC_C_MASK = 0x34 // 32-bit CRC constant
86SCC_C_PRES = 0x38 // 32-bit CRC preset
87SCC_MFLR = 0x46 // 16-bit max Rx frame length (without flags)
88
89REGBASE = DPRBASE + 0x1000
90PICR = REGBASE + 0x026 // 16-bit periodic irq control
91PITR = REGBASE + 0x02A // 16-bit periodic irq timing
92OR1 = REGBASE + 0x064 // 32-bit RAM bank #1 options
93CICR = REGBASE + 0x540 // 32(24)-bit CP interrupt config
94CIMR = REGBASE + 0x548 // 32-bit CP interrupt mask
95CISR = REGBASE + 0x54C // 32-bit CP interrupts in-service
96PADIR = REGBASE + 0x550 // 16-bit PortA data direction bitmap
97PAPAR = REGBASE + 0x552 // 16-bit PortA pin assignment bitmap
98PAODR = REGBASE + 0x554 // 16-bit PortA open drain bitmap
99PADAT = REGBASE + 0x556 // 16-bit PortA data register
100
101PCDIR = REGBASE + 0x560 // 16-bit PortC data direction bitmap
102PCPAR = REGBASE + 0x562 // 16-bit PortC pin assignment bitmap
103PCSO = REGBASE + 0x564 // 16-bit PortC special options
104PCDAT = REGBASE + 0x566 // 16-bit PortC data register
105PCINT = REGBASE + 0x568 // 16-bit PortC interrupt control
106CR = REGBASE + 0x5C0 // 16-bit Command register
107
108SCC1_REGS = REGBASE + 0x600
109SCC2_REGS = REGBASE + 0x620
110SCC3_REGS = REGBASE + 0x640
111SCC4_REGS = REGBASE + 0x660
112SICR = REGBASE + 0x6EC // 32-bit SI clock route
113
114// offset from SCCx_REGS
115SCC_GSMR_L = 0x00 // 32 bits
116SCC_GSMR_H = 0x04 // 32 bits
117SCC_PSMR = 0x08 // 16 bits
118SCC_TODR = 0x0C // 16 bits
119SCC_DSR = 0x0E // 16 bits
120SCC_SCCE = 0x10 // 16 bits
121SCC_SCCM = 0x14 // 16 bits
122SCC_SCCS = 0x17 // 8 bits
123
124#if QUICC_MEMCPY_USES_PLX
125 .macro memcpy_from_pci src, dest, len // len must be < 8 MB
126 addl #3, \len
127 andl #0xFFFFFFFC, \len // always copy n * 4 bytes
128 movel \src, PLX_DMA_0_PCI
129 movel \dest, PLX_DMA_0_LOCAL
130 movel \len, PLX_DMA_0_LENGTH
131 movel #0x0103, PLX_DMA_CMD_STS // start channel 0 transfer
132 bsr memcpy_from_pci_run
133 .endm
134
135 .macro memcpy_to_pci src, dest, len
136 addl #3, \len
137 andl #0xFFFFFFFC, \len // always copy n * 4 bytes
138 movel \src, PLX_DMA_1_LOCAL
139 movel \dest, PLX_DMA_1_PCI
140 movel \len, PLX_DMA_1_LENGTH
141 movel #0x0301, PLX_DMA_CMD_STS // start channel 1 transfer
142 bsr memcpy_to_pci_run
143 .endm
144
145#else
146
147 .macro memcpy src, dest, len // len must be < 65536 bytes
148 movel %d7, -(%sp) // src and dest must be < 256 MB
149 movel \len, %d7 // bits 0 and 1
150 lsrl #2, \len
151 andl \len, \len
152 beq 99f // only 0 - 3 bytes
153 subl #1, \len // for dbf
15498: movel (\src)+, (\dest)+
155 dbfw \len, 98b
15699: movel %d7, \len
157 btstl #1, \len
158 beq 99f
159 movew (\src)+, (\dest)+
16099: btstl #0, \len
161 beq 99f
162 moveb (\src)+, (\dest)+
16399:
164 movel (%sp)+, %d7
165 .endm
166
167 .macro memcpy_from_pci src, dest, len
168 addl #VALUE_WINDOW, \src
169 memcpy \src, \dest, \len
170 .endm
171
172 .macro memcpy_to_pci src, dest, len
173 addl #VALUE_WINDOW, \dest
174 memcpy \src, \dest, \len
175 .endm
176#endif
177
178
179 .macro wait_for_command
18099: btstl #0, CR
181 bne 99b
182 .endm
183
184
185
186
187/****************************** card initialization *******************/
188 .text
189 .global _start
190_start: bra init
191
192 .org _start + 4
193ch_status_addr: .long 0, 0, 0, 0
194rx_descs_addr: .long 0
195
196init:
197#if DETECT_RAM
198 movel OR1, %d0
199 andl #0xF00007FF, %d0 // mask AMxx bits
200 orl #0xFFFF800 & ~(MAX_RAM_SIZE - 1), %d0 // update RAM bank size
201 movel %d0, OR1
202#endif
203
204 addl #VALUE_WINDOW, rx_descs_addr // PCI addresses of shared data
205 clrl %d0 // D0 = 4 * port
206init_1: tstl ch_status_addr(%d0)
207 beq init_2
208 addl #VALUE_WINDOW, ch_status_addr(%d0)
209init_2: addl #4, %d0
210 cmpl #4 * 4, %d0
211 bne init_1
212
213 movel #pci9060_interrupt, PCI9060_VECTOR
214 movel #error_interrupt, ERROR_VECTOR
215 movel #port_interrupt_1, SCC1_VECTOR
216 movel #port_interrupt_2, SCC2_VECTOR
217 movel #port_interrupt_3, SCC3_VECTOR
218 movel #port_interrupt_4, SCC4_VECTOR
219 movel #timer_interrupt, TIMER_IRQ * 4
220
221 movel #0x78000000, CIMR // only SCCx IRQs from CPM
222 movew #(TIMER_IRQ_LEVEL << 8) + TIMER_IRQ, PICR // interrupt from PIT
223 movew #PITR_CONST, PITR
224
225 // SCC1=SCCa SCC2=SCCb SCC3=SCCc SCC4=SCCd prio=4 HP=-1 IRQ=64-79
226 movel #0xD41F40 + (CPM_IRQ_LEVEL << 13), CICR
227 movel #0x543, PLX_DMA_0_MODE // 32-bit, Ready, Burst, IRQ
228 movel #0x543, PLX_DMA_1_MODE
229 movel #0x0, PLX_DMA_0_DESC // from PCI to local
230 movel #0x8, PLX_DMA_1_DESC // from local to PCI
231 movel #0x101, PLX_DMA_CMD_STS // enable both DMA channels
232 // enable local IRQ, DMA, doorbells and PCI IRQ
233 orl #0x000F0300, PLX_INTERRUPT_CS
234
235#if DETECT_RAM
236 bsr ram_test
237#else
238 movel #1, PLX_MAILBOX_5 // non-zero value = init complete
239#endif
240 bsr check_csr
241
242 movew #0xFFFF, PAPAR // all pins are clocks/data
243 clrw PADIR // first function
244 clrw PCSO // CD and CTS always active
245
246
247/****************************** main loop *****************************/
248
249main: movel channel_stats, %d7 // D7 = doorbell + irq status
250 clrl channel_stats
251
252 tstl %d7
253 bne main_1
254 // nothing to do - wait for next event
255 stop #0x2200 // supervisor + IRQ level 2
256 movew #0x2700, %sr // disable IRQs again
257 bra main
258
259main_1: clrl %d0 // D0 = 4 * port
260 clrl %d6 // D6 = doorbell to host value
261
262main_l: btstl #DOORBELL_TO_CARD_CLOSE_0, %d7
263 beq main_op
264 bclrl #DOORBELL_TO_CARD_OPEN_0, %d7 // in case both bits are set
265 bsr close_port
266main_op:
267 btstl #DOORBELL_TO_CARD_OPEN_0, %d7
268 beq main_cl
269 bsr open_port
270main_cl:
271 btstl #DOORBELL_TO_CARD_TX_0, %d7
272 beq main_txend
273 bsr tx
274main_txend:
275 btstl #TASK_SCC_0, %d7
276 beq main_next
277 bsr tx_end
278 bsr rx
279
280main_next:
281 lsrl #1, %d7 // port status for next port
282 addl #4, %d0 // D0 = 4 * next port
283 cmpl #4 * 4, %d0
284 bne main_l
285 movel %d6, PLX_DOORBELL_FROM_CARD // signal the host
286 bra main
287
288
289/****************************** open port *****************************/
290
291open_port: // D0 = 4 * port, D6 = doorbell to host
292 movel ch_status_addr(%d0), %a0 // A0 = port status address
293 tstl STATUS_OPEN(%a0)
294 bne open_port_ret // port already open
295 movel #1, STATUS_OPEN(%a0) // confirm the port is open
296// setup BDs
297 clrl tx_in(%d0)
298 clrl tx_out(%d0)
299 clrl tx_count(%d0)
300 clrl rx_in(%d0)
301
302 movel SICR, %d1 // D1 = clock settings in SICR
303 andl clocking_mask(%d0), %d1
304 cmpl #CLOCK_TXFROMRX, STATUS_CLOCKING(%a0)
305 bne open_port_clock_ext
306 orl clocking_txfromrx(%d0), %d1
307 bra open_port_set_clock
308
309open_port_clock_ext:
310 orl clocking_ext(%d0), %d1
311open_port_set_clock:
312 movel %d1, SICR // update clock settings in SICR
313
314 orw #STATUS_CABLE_DTR, csr_output(%d0) // DTR on
315 bsr check_csr // call with disabled timer interrupt
316
317// Setup TX descriptors
318 movel first_buffer(%d0), %d1 // D1 = starting buffer address
319 movel tx_first_bd(%d0), %a1 // A1 = starting TX BD address
320 movel #TX_BUFFERS - 2, %d2 // D2 = TX_BUFFERS - 1 counter
321 movel #0x18000000, %d3 // D3 = initial TX BD flags: Int + Last
322 cmpl #PARITY_NONE, STATUS_PARITY(%a0)
323 beq open_port_tx_loop
324 bsetl #26, %d3 // TX BD flag: Transmit CRC
325open_port_tx_loop:
326 movel %d3, (%a1)+ // TX flags + length
327 movel %d1, (%a1)+ // buffer address
328 addl #BUFFER_LENGTH, %d1
329 dbfw %d2, open_port_tx_loop
330
331 bsetl #29, %d3 // TX BD flag: Wrap (last BD)
332 movel %d3, (%a1)+ // Final TX flags + length
333 movel %d1, (%a1)+ // buffer address
334
335// Setup RX descriptors // A1 = starting RX BD address
336 movel #RX_BUFFERS - 2, %d2 // D2 = RX_BUFFERS - 1 counter
337open_port_rx_loop:
338 movel #0x90000000, (%a1)+ // RX flags + length
339 movel %d1, (%a1)+ // buffer address
340 addl #BUFFER_LENGTH, %d1
341 dbfw %d2, open_port_rx_loop
342
343 movel #0xB0000000, (%a1)+ // Final RX flags + length
344 movel %d1, (%a1)+ // buffer address
345
346// Setup port parameters
347 movel scc_base_addr(%d0), %a1 // A1 = SCC_BASE address
348 movel scc_reg_addr(%d0), %a2 // A2 = SCC_REGS address
349
350 movel #0xFFFF, SCC_SCCE(%a2) // clear status bits
351 movel #0x0000, SCC_SCCM(%a2) // interrupt mask
352
353 movel tx_first_bd(%d0), %d1
354 movew %d1, SCC_TBASE(%a1) // D1 = offset of first TxBD
355 addl #TX_BUFFERS * 8, %d1
356 movew %d1, SCC_RBASE(%a1) // D1 = offset of first RxBD
357 moveb #0x8, SCC_RFCR(%a1) // Intel mode, 1000
358 moveb #0x8, SCC_TFCR(%a1)
359
360// Parity settings
361 cmpl #PARITY_CRC16_PR1_CCITT, STATUS_PARITY(%a0)
362 bne open_port_parity_1
363 clrw SCC_PSMR(%a2) // CRC16-CCITT
364 movel #0xF0B8, SCC_C_MASK(%a1)
365 movel #0xFFFF, SCC_C_PRES(%a1)
366 movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
367 movew #2, parity_bytes(%d0)
368 bra open_port_2
369
370open_port_parity_1:
371 cmpl #PARITY_CRC32_PR1_CCITT, STATUS_PARITY(%a0)
372 bne open_port_parity_2
373 movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT
374 movel #0xDEBB20E3, SCC_C_MASK(%a1)
375 movel #0xFFFFFFFF, SCC_C_PRES(%a1)
376 movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
377 movew #4, parity_bytes(%d0)
378 bra open_port_2
379
380open_port_parity_2:
381 cmpl #PARITY_CRC16_PR0_CCITT, STATUS_PARITY(%a0)
382 bne open_port_parity_3
383 clrw SCC_PSMR(%a2) // CRC16-CCITT preset 0
384 movel #0xF0B8, SCC_C_MASK(%a1)
385 clrl SCC_C_PRES(%a1)
386 movew #HDLC_MAX_MRU + 2, SCC_MFLR(%a1) // 2 bytes for CRC
387 movew #2, parity_bytes(%d0)
388 bra open_port_2
389
390open_port_parity_3:
391 cmpl #PARITY_CRC32_PR0_CCITT, STATUS_PARITY(%a0)
392 bne open_port_parity_4
393 movew #0x0800, SCC_PSMR(%a2) // CRC32-CCITT preset 0
394 movel #0xDEBB20E3, SCC_C_MASK(%a1)
395 clrl SCC_C_PRES(%a1)
396 movew #HDLC_MAX_MRU + 4, SCC_MFLR(%a1) // 4 bytes for CRC
397 movew #4, parity_bytes(%d0)
398 bra open_port_2
399
400open_port_parity_4:
401 clrw SCC_PSMR(%a2) // no parity
402 movel #0xF0B8, SCC_C_MASK(%a1)
403 movel #0xFFFF, SCC_C_PRES(%a1)
404 movew #HDLC_MAX_MRU, SCC_MFLR(%a1) // 0 bytes for CRC
405 clrw parity_bytes(%d0)
406
407open_port_2:
408 movel #0x00000003, SCC_GSMR_H(%a2) // RTSM
409 cmpl #ENCODING_NRZI, STATUS_ENCODING(%a0)
410 bne open_port_nrz
411 movel #0x10040900, SCC_GSMR_L(%a2) // NRZI: TCI Tend RECN+TENC=1
412 bra open_port_3
413
414open_port_nrz:
415 movel #0x10040000, SCC_GSMR_L(%a2) // NRZ: TCI Tend RECN+TENC=0
416open_port_3:
417 movew #BUFFER_LENGTH, SCC_MRBLR(%a1)
418 movel %d0, %d1
419 lsll #4, %d1 // D1 bits 7 and 6 = port
420 orl #1, %d1
421 movew %d1, CR // Init SCC RX and TX params
422 wait_for_command
423
424 // TCI Tend ENR ENT
425 movew #0x001F, SCC_SCCM(%a2) // TXE RXF BSY TXB RXB interrupts
426 orl #0x00000030, SCC_GSMR_L(%a2) // enable SCC
427open_port_ret:
428 rts
429
430
431/****************************** close port ****************************/
432
433close_port: // D0 = 4 * port, D6 = doorbell to host
434 movel scc_reg_addr(%d0), %a0 // A0 = SCC_REGS address
435 clrw SCC_SCCM(%a0) // no SCC interrupts
436 andl #0xFFFFFFCF, SCC_GSMR_L(%a0) // Disable ENT and ENR
437
438 andw #~STATUS_CABLE_DTR, csr_output(%d0) // DTR off
439 bsr check_csr // call with disabled timer interrupt
440
441 movel ch_status_addr(%d0), %d1
442 clrl STATUS_OPEN(%d1) // confirm the port is closed
443 rts
444
445
446/****************************** transmit packet ***********************/
447// queue packets for transmission
448tx: // D0 = 4 * port, D6 = doorbell to host
449 cmpl #TX_BUFFERS, tx_count(%d0)
450 beq tx_ret // all DB's = descs in use
451
452 movel tx_out(%d0), %d1
453 movel %d1, %d2 // D1 = D2 = tx_out BD# = desc#
454 mulul #DESC_LENGTH, %d2 // D2 = TX desc offset
455 addl ch_status_addr(%d0), %d2
456 addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address
457 cmpl #PACKET_FULL, (%d2) // desc status
458 bne tx_ret
459
460// queue it
461 movel 4(%d2), %a0 // PCI address
462 lsll #3, %d1 // BD is 8-bytes long
463 addl tx_first_bd(%d0), %d1 // D1 = current tx_out BD addr
464
465 movel 4(%d1), %a1 // A1 = dest address
466 movel 8(%d2), %d2 // D2 = length
467 movew %d2, 2(%d1) // length into BD
468 memcpy_from_pci %a0, %a1, %d2
469 bsetl #31, (%d1) // CP go ahead
470
471// update tx_out and tx_count
472 movel tx_out(%d0), %d1
473 addl #1, %d1
474 cmpl #TX_BUFFERS, %d1
475 bne tx_1
476 clrl %d1
477tx_1: movel %d1, tx_out(%d0)
478
479 addl #1, tx_count(%d0)
480 bra tx
481
482tx_ret: rts
483
484
485/****************************** packet received ***********************/
486
487// Service receive buffers // D0 = 4 * port, D6 = doorbell to host
488rx: movel rx_in(%d0), %d1 // D1 = rx_in BD#
489 lsll #3, %d1 // BD is 8-bytes long
490 addl rx_first_bd(%d0), %d1 // D1 = current rx_in BD address
491 movew (%d1), %d2 // D2 = RX BD flags
492 btstl #15, %d2
493 bne rx_ret // BD still empty
494
495 btstl #1, %d2
496 bne rx_overrun
497
498 tstw parity_bytes(%d0)
499 bne rx_parity
500 bclrl #2, %d2 // do not test for CRC errors
501rx_parity:
502 andw #0x0CBC, %d2 // mask status bits
503 cmpw #0x0C00, %d2 // correct frame
504 bne rx_bad_frame
505 clrl %d3
506 movew 2(%d1), %d3
507 subw parity_bytes(%d0), %d3 // D3 = packet length
508 cmpw #HDLC_MAX_MRU, %d3
509 bgt rx_bad_frame
510
511rx_good_frame:
512 movel rx_out, %d2
513 mulul #DESC_LENGTH, %d2
514 addl rx_descs_addr, %d2 // D2 = RX desc address
515 cmpl #PACKET_EMPTY, (%d2) // desc stat
516 bne rx_overrun
517
518 movel %d3, 8(%d2)
519 movel 4(%d1), %a0 // A0 = source address
520 movel 4(%d2), %a1
521 tstl %a1
522 beq rx_ignore_data
523 memcpy_to_pci %a0, %a1, %d3
524rx_ignore_data:
525 movel packet_full(%d0), (%d2) // update desc stat
526
527// update D6 and rx_out
528 bsetl #DOORBELL_FROM_CARD_RX, %d6 // signal host that RX completed
529 movel rx_out, %d2
530 addl #1, %d2
531 cmpl #RX_QUEUE_LENGTH, %d2
532 bne rx_1
533 clrl %d2
534rx_1: movel %d2, rx_out
535
536rx_free_bd:
537 andw #0xF000, (%d1) // clear CM and error bits
538 bsetl #31, (%d1) // free BD
539// update rx_in
540 movel rx_in(%d0), %d1
541 addl #1, %d1
542 cmpl #RX_BUFFERS, %d1
543 bne rx_2
544 clrl %d1
545rx_2: movel %d1, rx_in(%d0)
546 bra rx
547
548rx_overrun:
549 movel ch_status_addr(%d0), %d2
550 addl #1, STATUS_RX_OVERRUNS(%d2)
551 bra rx_free_bd
552
553rx_bad_frame:
554 movel ch_status_addr(%d0), %d2
555 addl #1, STATUS_RX_FRAME_ERRORS(%d2)
556 bra rx_free_bd
557
558rx_ret: rts
559
560
561/****************************** packet transmitted ********************/
562
563// Service transmit buffers // D0 = 4 * port, D6 = doorbell to host
564tx_end: tstl tx_count(%d0)
565 beq tx_end_ret // TX buffers already empty
566
567 movel tx_in(%d0), %d1
568 movel %d1, %d2 // D1 = D2 = tx_in BD# = desc#
569 lsll #3, %d1 // BD is 8-bytes long
570 addl tx_first_bd(%d0), %d1 // D1 = current tx_in BD address
571 movew (%d1), %d3 // D3 = TX BD flags
572 btstl #15, %d3
573 bne tx_end_ret // BD still being transmitted
574
575// update D6, tx_in and tx_count
576 orl bell_tx(%d0), %d6 // signal host that TX desc freed
577 subl #1, tx_count(%d0)
578 movel tx_in(%d0), %d1
579 addl #1, %d1
580 cmpl #TX_BUFFERS, %d1
581 bne tx_end_1
582 clrl %d1
583tx_end_1:
584 movel %d1, tx_in(%d0)
585
586// free host's descriptor
587 mulul #DESC_LENGTH, %d2 // D2 = TX desc offset
588 addl ch_status_addr(%d0), %d2
589 addl #STATUS_TX_DESCS, %d2 // D2 = TX desc address
590 btstl #1, %d3
591 bne tx_end_underrun
592 movel #PACKET_SENT, (%d2)
593 bra tx_end
594
595tx_end_underrun:
596 movel #PACKET_UNDERRUN, (%d2)
597 bra tx_end
598
599tx_end_ret: rts
600
601
602/****************************** PLX PCI9060 DMA memcpy ****************/
603
604#if QUICC_MEMCPY_USES_PLX
605// called with interrupts disabled
606memcpy_from_pci_run:
607 movel %d0, -(%sp)
608 movew %sr, -(%sp)
609memcpy_1:
610 movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly
611 btstl #4, %d0 // transfer done?
612 bne memcpy_end
613 stop #0x2200 // enable PCI9060 interrupts
614 movew #0x2700, %sr // disable interrupts again
615 bra memcpy_1
616
617memcpy_to_pci_run:
618 movel %d0, -(%sp)
619 movew %sr, -(%sp)
620memcpy_2:
621 movel PLX_DMA_CMD_STS, %d0 // do not btst PLX register directly
622 btstl #12, %d0 // transfer done?
623 bne memcpy_end
624 stop #0x2200 // enable PCI9060 interrupts
625 movew #0x2700, %sr // disable interrupts again
626 bra memcpy_2
627
628memcpy_end:
629 movew (%sp)+, %sr
630 movel (%sp)+, %d0
631 rts
632#endif
633
634
635
636
637
638
639/****************************** PLX PCI9060 interrupt *****************/
640
641pci9060_interrupt:
642 movel %d0, -(%sp)
643
644 movel PLX_DOORBELL_TO_CARD, %d0
645 movel %d0, PLX_DOORBELL_TO_CARD // confirm all requests
646 orl %d0, channel_stats
647
648 movel #0x0909, PLX_DMA_CMD_STS // clear DMA ch #0 and #1 interrupts
649
650 movel (%sp)+, %d0
651 rte
652
653/****************************** SCC interrupts ************************/
654
655port_interrupt_1:
656 orl #0, SCC1_REGS + SCC_SCCE; // confirm SCC events
657 orl #1 << TASK_SCC_0, channel_stats
658 movel #0x40000000, CISR
659 rte
660
661port_interrupt_2:
662 orl #0, SCC2_REGS + SCC_SCCE; // confirm SCC events
663 orl #1 << TASK_SCC_1, channel_stats
664 movel #0x20000000, CISR
665 rte
666
667port_interrupt_3:
668 orl #0, SCC3_REGS + SCC_SCCE; // confirm SCC events
669 orl #1 << TASK_SCC_2, channel_stats
670 movel #0x10000000, CISR
671 rte
672
673port_interrupt_4:
674 orl #0, SCC4_REGS + SCC_SCCE; // confirm SCC events
675 orl #1 << TASK_SCC_3, channel_stats
676 movel #0x08000000, CISR
677 rte
678
679error_interrupt:
680 rte
681
682
683/****************************** cable and PM routine ******************/
684// modified registers: none
685check_csr:
686 movel %d0, -(%sp)
687 movel %d1, -(%sp)
688 movel %d2, -(%sp)
689 movel %a0, -(%sp)
690 movel %a1, -(%sp)
691
692 clrl %d0 // D0 = 4 * port
693 movel #CSRA, %a0 // A0 = CSR address
694
695check_csr_loop:
696 movew (%a0), %d1 // D1 = CSR input bits
697 andl #0xE7, %d1 // PM and cable sense bits (no DCE bit)
698 cmpw #STATUS_CABLE_V35 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
699 bne check_csr_1
700 movew #0x0E08, %d1
701 bra check_csr_valid
702
703check_csr_1:
704 cmpw #STATUS_CABLE_X21 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
705 bne check_csr_2
706 movew #0x0408, %d1
707 bra check_csr_valid
708
709check_csr_2:
710 cmpw #STATUS_CABLE_V24 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
711 bne check_csr_3
712 movew #0x0208, %d1
713 bra check_csr_valid
714
715check_csr_3:
716 cmpw #STATUS_CABLE_EIA530 * (1 + 1 << STATUS_CABLE_PM_SHIFT), %d1
717 bne check_csr_disable
718 movew #0x0D08, %d1
719 bra check_csr_valid
720
721check_csr_disable:
722 movew #0x0008, %d1 // D1 = disable everything
723 movew #0x80E7, %d2 // D2 = input mask: ignore DSR
724 bra check_csr_write
725
726check_csr_valid: // D1 = mode and IRQ bits
727 movew csr_output(%d0), %d2
728 andw #0x3000, %d2 // D2 = requested LL and DTR bits
729 orw %d2, %d1 // D1 = all requested output bits
730 movew #0x80FF, %d2 // D2 = input mask: include DSR
731
732check_csr_write:
733 cmpw old_csr_output(%d0), %d1
734 beq check_csr_input
735 movew %d1, old_csr_output(%d0)
736 movew %d1, (%a0) // Write CSR output bits
737
738check_csr_input:
739 movew (PCDAT), %d1
740 andw dcd_mask(%d0), %d1
741 beq check_csr_dcd_on // DCD and CTS signals are negated
742 movew (%a0), %d1 // D1 = CSR input bits
743 andw #~STATUS_CABLE_DCD, %d1 // DCD off
744 bra check_csr_previous
745
746check_csr_dcd_on:
747 movew (%a0), %d1 // D1 = CSR input bits
748 orw #STATUS_CABLE_DCD, %d1 // DCD on
749check_csr_previous:
750 andw %d2, %d1 // input mask
751 movel ch_status_addr(%d0), %a1
752 cmpl STATUS_CABLE(%a1), %d1 // check for change
753 beq check_csr_next
754 movel %d1, STATUS_CABLE(%a1) // update status
755 movel bell_cable(%d0), PLX_DOORBELL_FROM_CARD // signal the host
756
757check_csr_next:
758 addl #2, %a0 // next CSR register
759 addl #4, %d0 // D0 = 4 * next port
760 cmpl #4 * 4, %d0
761 bne check_csr_loop
762
763 movel (%sp)+, %a1
764 movel (%sp)+, %a0
765 movel (%sp)+, %d2
766 movel (%sp)+, %d1
767 movel (%sp)+, %d0
768 rts
769
770
771/****************************** timer interrupt ***********************/
772
773timer_interrupt:
774 bsr check_csr
775 rte
776
777
778/****************************** RAM sizing and test *******************/
779#if DETECT_RAM
780ram_test:
781 movel #0x12345678, %d1 // D1 = test value
782 movel %d1, (128 * 1024 - 4)
783 movel #128 * 1024, %d0 // D0 = RAM size tested
784ram_test_size:
785 cmpl #MAX_RAM_SIZE, %d0
786 beq ram_test_size_found
787 movel %d0, %a0
788 addl #128 * 1024 - 4, %a0
789 cmpl (%a0), %d1
790 beq ram_test_size_check
791ram_test_next_size:
792 lsll #1, %d0
793 bra ram_test_size
794
795ram_test_size_check:
796 eorl #0xFFFFFFFF, %d1
797 movel %d1, (128 * 1024 - 4)
798 cmpl (%a0), %d1
799 bne ram_test_next_size
800
801ram_test_size_found: // D0 = RAM size
802 movel %d0, %a0 // A0 = fill ptr
803 subl #firmware_end + 4, %d0
804 lsrl #2, %d0
805 movel %d0, %d1 // D1 = DBf counter
806ram_test_fill:
807 movel %a0, -(%a0)
808 dbfw %d1, ram_test_fill
809 subl #0x10000, %d1
810 cmpl #0xFFFFFFFF, %d1
811 bne ram_test_fill
812
813ram_test_loop: // D0 = DBf counter
814 cmpl (%a0)+, %a0
815 dbnew %d0, ram_test_loop
816 bne ram_test_found_bad
817 subl #0x10000, %d0
818 cmpl #0xFFFFFFFF, %d0
819 bne ram_test_loop
820 bra ram_test_all_ok
821
822ram_test_found_bad:
823 subl #4, %a0
824ram_test_all_ok:
825 movel %a0, PLX_MAILBOX_5
826 rts
827#endif
828
829
830/****************************** constants *****************************/
831
832scc_reg_addr:
833 .long SCC1_REGS, SCC2_REGS, SCC3_REGS, SCC4_REGS
834scc_base_addr:
835 .long SCC1_BASE, SCC2_BASE, SCC3_BASE, SCC4_BASE
836
837tx_first_bd:
838 .long DPRBASE
839 .long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8
840 .long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
841 .long DPRBASE + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
842
843rx_first_bd:
844 .long DPRBASE + TX_BUFFERS * 8
845 .long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8
846 .long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 2
847 .long DPRBASE + TX_BUFFERS * 8 + (TX_BUFFERS + RX_BUFFERS) * 8 * 3
848
849first_buffer:
850 .long BUFFERS_ADDR
851 .long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH
852 .long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 2
853 .long BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * 3
854
855bell_tx:
856 .long 1 << DOORBELL_FROM_CARD_TX_0, 1 << DOORBELL_FROM_CARD_TX_1
857 .long 1 << DOORBELL_FROM_CARD_TX_2, 1 << DOORBELL_FROM_CARD_TX_3
858
859bell_cable:
860 .long 1 << DOORBELL_FROM_CARD_CABLE_0, 1 << DOORBELL_FROM_CARD_CABLE_1
861 .long 1 << DOORBELL_FROM_CARD_CABLE_2, 1 << DOORBELL_FROM_CARD_CABLE_3
862
863packet_full:
864 .long PACKET_FULL, PACKET_FULL + 1, PACKET_FULL + 2, PACKET_FULL + 3
865
866clocking_ext:
867 .long 0x0000002C, 0x00003E00, 0x002C0000, 0x3E000000
868clocking_txfromrx:
869 .long 0x0000002D, 0x00003F00, 0x002D0000, 0x3F000000
870clocking_mask:
871 .long 0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000
872dcd_mask:
873 .word 0x020, 0, 0x080, 0, 0x200, 0, 0x800
874
875 .ascii "wanXL firmware\n"
876 .asciz "Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>\n"
877
878
879/****************************** variables *****************************/
880
881 .align 4
882channel_stats: .long 0
883
884tx_in: .long 0, 0, 0, 0 // transmitted
885tx_out: .long 0, 0, 0, 0 // received from host for transmission
886tx_count: .long 0, 0, 0, 0 // currently in transmit queue
887
888rx_in: .long 0, 0, 0, 0 // received from port
889rx_out: .long 0 // transmitted to host
890parity_bytes: .word 0, 0, 0, 0, 0, 0, 0 // only 4 words are used
891
892csr_output: .word 0
893old_csr_output: .word 0, 0, 0, 0, 0, 0, 0
894 .align 4
895firmware_end: // must be dword-aligned
diff --git a/drivers/net/wan/wanxlfw.inc_shipped b/drivers/net/wan/wanxlfw.inc_shipped
new file mode 100644
index 000000000000..73da688f943b
--- /dev/null
+++ b/drivers/net/wan/wanxlfw.inc_shipped
@@ -0,0 +1,158 @@
1static u8 firmware[]={
20x60,0x00,0x00,0x16,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
30x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x06,0xB9,0x40,0x00,0x00,0x00,0x00,0x00,
40x10,0x14,0x42,0x80,0x4A,0xB0,0x09,0xB0,0x00,0x00,0x10,0x04,0x67,0x00,0x00,0x0E,
50x06,0xB0,0x40,0x00,0x00,0x00,0x09,0xB0,0x00,0x00,0x10,0x04,0x58,0x80,0x0C,0x80,
60x00,0x00,0x00,0x10,0x66,0x00,0xFF,0xDE,0x21,0xFC,0x00,0x00,0x16,0xBC,0x00,0x6C,
70x21,0xFC,0x00,0x00,0x17,0x5E,0x01,0x00,0x21,0xFC,0x00,0x00,0x16,0xDE,0x01,0x78,
80x21,0xFC,0x00,0x00,0x16,0xFE,0x01,0x74,0x21,0xFC,0x00,0x00,0x17,0x1E,0x01,0x70,
90x21,0xFC,0x00,0x00,0x17,0x3E,0x01,0x6C,0x21,0xFC,0x00,0x00,0x18,0x4C,0x02,0x00,
100x23,0xFC,0x78,0x00,0x00,0x00,0xFF,0xFC,0x15,0x48,0x33,0xFC,0x04,0x80,0xFF,0xFC,
110x10,0x26,0x33,0xFC,0x01,0x10,0xFF,0xFC,0x10,0x2A,0x23,0xFC,0x00,0xD4,0x9F,0x40,
120xFF,0xFC,0x15,0x40,0x23,0xFC,0x00,0x00,0x05,0x43,0xFF,0xF9,0x01,0x00,0x23,0xFC,
130x00,0x00,0x05,0x43,0xFF,0xF9,0x01,0x14,0x23,0xFC,0x00,0x00,0x00,0x00,0xFF,0xF9,
140x01,0x10,0x23,0xFC,0x00,0x00,0x00,0x08,0xFF,0xF9,0x01,0x24,0x23,0xFC,0x00,0x00,
150x01,0x01,0xFF,0xF9,0x01,0x28,0x00,0xB9,0x00,0x0F,0x03,0x00,0xFF,0xF9,0x00,0xE8,
160x23,0xFC,0x00,0x00,0x00,0x01,0xFF,0xF9,0x00,0xD4,0x61,0x00,0x06,0x74,0x33,0xFC,
170xFF,0xFF,0xFF,0xFC,0x15,0x52,0x42,0x79,0xFF,0xFC,0x15,0x50,0x42,0x79,0xFF,0xFC,
180x15,0x64,0x2E,0x3A,0x08,0x50,0x42,0xB9,0x00,0x00,0x19,0x54,0x4A,0x87,0x66,0x00,
190x00,0x0E,0x4E,0x72,0x22,0x00,0x46,0xFC,0x27,0x00,0x60,0x00,0xFF,0xE6,0x42,0x80,
200x42,0x86,0x08,0x07,0x00,0x04,0x67,0x00,0x00,0x0A,0x08,0x87,0x00,0x00,0x61,0x00,
210x02,0xA0,0x08,0x07,0x00,0x00,0x67,0x00,0x00,0x06,0x61,0x00,0x00,0x36,0x08,0x07,
220x00,0x08,0x67,0x00,0x00,0x06,0x61,0x00,0x02,0xB8,0x08,0x07,0x00,0x0C,0x67,0x00,
230x00,0x0A,0x61,0x00,0x04,0x94,0x61,0x00,0x03,0x60,0xE2,0x8F,0x58,0x80,0x0C,0x80,
240x00,0x00,0x00,0x10,0x66,0x00,0xFF,0xBC,0x23,0xC6,0xFF,0xF9,0x00,0xE4,0x60,0x00,
250xFF,0x92,0x20,0x70,0x09,0xB0,0x00,0x00,0x10,0x04,0x4A,0xA8,0x00,0x00,0x66,0x00,
260x02,0x4E,0x21,0x7C,0x00,0x00,0x00,0x01,0x00,0x00,0x42,0xB0,0x09,0xB0,0x00,0x00,
270x19,0x58,0x42,0xB0,0x09,0xB0,0x00,0x00,0x19,0x68,0x42,0xB0,0x09,0xB0,0x00,0x00,
280x19,0x78,0x42,0xB0,0x09,0xB0,0x00,0x00,0x19,0x88,0x22,0x39,0xFF,0xFC,0x16,0xEC,
290xC2,0xB0,0x09,0xB0,0x00,0x00,0x18,0xF2,0x0C,0xA8,0x00,0x00,0x00,0x04,0x00,0x18,
300x66,0x00,0x00,0x0E,0x82,0xB0,0x09,0xB0,0x00,0x00,0x18,0xE2,0x60,0x00,0x00,0x0A,
310x82,0xB0,0x09,0xB0,0x00,0x00,0x18,0xD2,0x23,0xC1,0xFF,0xFC,0x16,0xEC,0x00,0x70,
320x10,0x00,0x09,0xB0,0x00,0x00,0x19,0xAA,0x61,0x00,0x05,0x76,0x22,0x30,0x09,0xB0,
330x00,0x00,0x18,0x92,0x22,0x70,0x09,0xB0,0x00,0x00,0x18,0x72,0x74,0x08,0x26,0x3C,
340x18,0x00,0x00,0x00,0x0C,0xA8,0x00,0x00,0x00,0x01,0x00,0x10,0x67,0x00,0x00,0x06,
350x08,0xC3,0x00,0x1A,0x22,0xC3,0x22,0xC1,0x06,0x81,0x00,0x00,0x05,0xFC,0x51,0xCA,
360xFF,0xF4,0x08,0xC3,0x00,0x1D,0x22,0xC3,0x22,0xC1,0x74,0x1C,0x22,0xFC,0x90,0x00,
370x00,0x00,0x22,0xC1,0x06,0x81,0x00,0x00,0x05,0xFC,0x51,0xCA,0xFF,0xF0,0x22,0xFC,
380xB0,0x00,0x00,0x00,0x22,0xC1,0x22,0x70,0x09,0xB0,0x00,0x00,0x18,0x62,0x24,0x70,
390x09,0xB0,0x00,0x00,0x18,0x52,0x25,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x10,0x25,0x7C,
400x00,0x00,0x00,0x00,0x00,0x14,0x22,0x30,0x09,0xB0,0x00,0x00,0x18,0x72,0x33,0x41,
410x00,0x02,0x06,0x81,0x00,0x00,0x00,0x50,0x33,0x41,0x00,0x00,0x13,0x7C,0x00,0x08,
420x00,0x04,0x13,0x7C,0x00,0x08,0x00,0x05,0x0C,0xA8,0x00,0x00,0x00,0x05,0x00,0x10,
430x66,0x00,0x00,0x2A,0x42,0x6A,0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,
440x23,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x38,0x33,0x7C,0x05,0xFA,0x00,0x46,0x31,0xBC,
450x00,0x02,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,0x00,0xBC,0x0C,0xA8,0x00,0x00,
460x00,0x07,0x00,0x10,0x66,0x00,0x00,0x2C,0x35,0x7C,0x08,0x00,0x00,0x08,0x23,0x7C,
470xDE,0xBB,0x20,0xE3,0x00,0x34,0x23,0x7C,0xFF,0xFF,0xFF,0xFF,0x00,0x38,0x33,0x7C,
480x05,0xFC,0x00,0x46,0x31,0xBC,0x00,0x04,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,
490x00,0x86,0x0C,0xA8,0x00,0x00,0x00,0x04,0x00,0x10,0x66,0x00,0x00,0x26,0x42,0x6A,
500x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,0x42,0xA9,0x00,0x38,0x33,0x7C,
510x05,0xFA,0x00,0x46,0x31,0xBC,0x00,0x02,0x09,0xB0,0x00,0x00,0x19,0x9C,0x60,0x00,
520x00,0x56,0x0C,0xA8,0x00,0x00,0x00,0x06,0x00,0x10,0x66,0x00,0x00,0x28,0x35,0x7C,
530x08,0x00,0x00,0x08,0x23,0x7C,0xDE,0xBB,0x20,0xE3,0x00,0x34,0x42,0xA9,0x00,0x38,
540x33,0x7C,0x05,0xFC,0x00,0x46,0x31,0xBC,0x00,0x04,0x09,0xB0,0x00,0x00,0x19,0x9C,
550x60,0x00,0x00,0x24,0x42,0x6A,0x00,0x08,0x23,0x7C,0x00,0x00,0xF0,0xB8,0x00,0x34,
560x23,0x7C,0x00,0x00,0xFF,0xFF,0x00,0x38,0x33,0x7C,0x05,0xF8,0x00,0x46,0x42,0x70,
570x09,0xB0,0x00,0x00,0x19,0x9C,0x25,0x7C,0x00,0x00,0x00,0x03,0x00,0x04,0x0C,0xA8,
580x00,0x00,0x00,0x02,0x00,0x14,0x66,0x00,0x00,0x0E,0x25,0x7C,0x10,0x04,0x09,0x00,
590x00,0x00,0x60,0x00,0x00,0x0A,0x25,0x7C,0x10,0x04,0x00,0x00,0x00,0x00,0x33,0x7C,
600x05,0xFC,0x00,0x06,0x22,0x00,0xE9,0x89,0x00,0x81,0x00,0x00,0x00,0x01,0x33,0xC1,
610xFF,0xFC,0x15,0xC0,0x08,0x39,0x00,0x00,0xFF,0xFC,0x15,0xC0,0x66,0x00,0xFF,0xF6,
620x35,0x7C,0x00,0x1F,0x00,0x14,0x00,0xAA,0x00,0x00,0x00,0x30,0x00,0x00,0x4E,0x75,
630x20,0x70,0x09,0xB0,0x00,0x00,0x18,0x52,0x42,0x68,0x00,0x14,0x02,0xA8,0xFF,0xFF,
640xFF,0xCF,0x00,0x00,0x02,0x70,0xEF,0xFF,0x09,0xB0,0x00,0x00,0x19,0xAA,0x61,0x00,
650x03,0x70,0x22,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x42,0xB0,0x19,0x90,0x4E,0x75,
660x0C,0xB0,0x00,0x00,0x00,0x0A,0x09,0xB0,0x00,0x00,0x19,0x78,0x67,0x00,0x00,0xA8,
670x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x68,0x24,0x01,0x4C,0x3C,0x20,0x00,0x00,0x00,
680x00,0x0C,0xD4,0xB0,0x09,0xB0,0x00,0x00,0x10,0x04,0x06,0x82,0x00,0x00,0x00,0x1C,
690x0C,0xB0,0x00,0x00,0x00,0x10,0x29,0x90,0x66,0x00,0x00,0x7C,0x20,0x70,0x29,0xA0,
700x00,0x04,0xE7,0x89,0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x72,0x22,0x70,0x19,0xA0,
710x00,0x04,0x24,0x30,0x29,0xA0,0x00,0x08,0x31,0x82,0x19,0xA0,0x00,0x02,0x56,0x82,
720x02,0x82,0xFF,0xFF,0xFF,0xFC,0x23,0xC8,0xFF,0xF9,0x01,0x04,0x23,0xC9,0xFF,0xF9,
730x01,0x08,0x23,0xC2,0xFF,0xF9,0x01,0x0C,0x23,0xFC,0x00,0x00,0x01,0x03,0xFF,0xF9,
740x01,0x28,0x61,0x00,0x01,0xF6,0x08,0xF0,0x00,0x1F,0x19,0x90,0x22,0x30,0x09,0xB0,
750x00,0x00,0x19,0x68,0x52,0x81,0x0C,0x81,0x00,0x00,0x00,0x0A,0x66,0x00,0x00,0x04,
760x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,0x19,0x68,0x52,0xB0,0x09,0xB0,0x00,0x00,
770x19,0x78,0x60,0x00,0xFF,0x4C,0x4E,0x75,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x88,
780xE7,0x89,0xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x82,0x34,0x30,0x19,0x90,0x08,0x02,
790x00,0x0F,0x66,0x00,0x01,0x12,0x08,0x02,0x00,0x01,0x66,0x00,0x00,0xE6,0x4A,0x70,
800x09,0xB0,0x00,0x00,0x19,0x9C,0x66,0x00,0x00,0x06,0x08,0x82,0x00,0x02,0x02,0x42,
810x0C,0xBC,0x0C,0x42,0x0C,0x00,0x66,0x00,0x00,0xDC,0x42,0x83,0x36,0x30,0x19,0xA0,
820x00,0x02,0x96,0x70,0x09,0xB0,0x00,0x00,0x19,0x9C,0x0C,0x43,0x05,0xF8,0x6E,0x00,
830x00,0xC4,0x24,0x3A,0x04,0x84,0x4C,0x3C,0x20,0x00,0x00,0x00,0x00,0x0C,0xD4,0xBA,
840xFA,0xF4,0x0C,0xB0,0x00,0x00,0x00,0x00,0x29,0x90,0x66,0x00,0x00,0x96,0x21,0x83,
850x29,0xA0,0x00,0x08,0x20,0x70,0x19,0xA0,0x00,0x04,0x22,0x70,0x29,0xA0,0x00,0x04,
860x4A,0x89,0x67,0x00,0x00,0x2A,0x56,0x83,0x02,0x83,0xFF,0xFF,0xFF,0xFC,0x23,0xC8,
870xFF,0xF9,0x01,0x1C,0x23,0xC9,0xFF,0xF9,0x01,0x18,0x23,0xC3,0xFF,0xF9,0x01,0x20,
880x23,0xFC,0x00,0x00,0x03,0x01,0xFF,0xF9,0x01,0x28,0x61,0x00,0x01,0x2C,0x21,0xB0,
890x09,0xB0,0x00,0x00,0x18,0xC2,0x29,0x90,0x08,0xC6,0x00,0x04,0x24,0x3A,0x04,0x1A,
900x52,0x82,0x0C,0x82,0x00,0x00,0x00,0x28,0x66,0x00,0x00,0x04,0x42,0x82,0x23,0xC2,
910x00,0x00,0x19,0x98,0x02,0x70,0xF0,0x00,0x19,0x90,0x08,0xF0,0x00,0x1F,0x19,0x90,
920x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x88,0x52,0x81,0x0C,0x81,0x00,0x00,0x00,0x1E,
930x66,0x00,0x00,0x04,0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,0x19,0x88,0x60,0x00,
940xFE,0xF8,0x24,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x52,0xB0,0x29,0xA0,0x00,0x08,
950x60,0x00,0xFF,0xC2,0x24,0x30,0x09,0xB0,0x00,0x00,0x10,0x04,0x52,0xB0,0x29,0xA0,
960x00,0x0C,0x60,0x00,0xFF,0xB0,0x4E,0x75,0x4A,0xB0,0x09,0xB0,0x00,0x00,0x19,0x78,
970x67,0x00,0x00,0x86,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x58,0x24,0x01,0xE7,0x89,
980xD2,0xB0,0x09,0xB0,0x00,0x00,0x18,0x72,0x36,0x30,0x19,0x90,0x08,0x03,0x00,0x0F,
990x66,0x00,0x00,0x66,0x8C,0xB0,0x09,0xB0,0x00,0x00,0x18,0xA2,0x53,0xB0,0x09,0xB0,
1000x00,0x00,0x19,0x78,0x22,0x30,0x09,0xB0,0x00,0x00,0x19,0x58,0x52,0x81,0x0C,0x81,
1010x00,0x00,0x00,0x0A,0x66,0x00,0x00,0x04,0x42,0x81,0x21,0x81,0x09,0xB0,0x00,0x00,
1020x19,0x58,0x4C,0x3C,0x20,0x00,0x00,0x00,0x00,0x0C,0xD4,0xB0,0x09,0xB0,0x00,0x00,
1030x10,0x04,0x06,0x82,0x00,0x00,0x00,0x1C,0x08,0x03,0x00,0x01,0x66,0x00,0x00,0x0E,
1040x21,0xBC,0x00,0x00,0x00,0x20,0x29,0x90,0x60,0x00,0xFF,0x7E,0x21,0xBC,0x00,0x00,
1050x00,0x30,0x29,0x90,0x60,0x00,0xFF,0x72,0x4E,0x75,0x2F,0x00,0x40,0xE7,0x20,0x39,
1060xFF,0xF9,0x01,0x28,0x08,0x00,0x00,0x04,0x66,0x00,0x00,0x2C,0x4E,0x72,0x22,0x00,
1070x46,0xFC,0x27,0x00,0x60,0x00,0xFF,0xE8,0x2F,0x00,0x40,0xE7,0x20,0x39,0xFF,0xF9,
1080x01,0x28,0x08,0x00,0x00,0x0C,0x66,0x00,0x00,0x0E,0x4E,0x72,0x22,0x00,0x46,0xFC,
1090x27,0x00,0x60,0x00,0xFF,0xE8,0x46,0xDF,0x20,0x1F,0x4E,0x75,0x2F,0x00,0x20,0x39,
1100xFF,0xF9,0x00,0xE0,0x23,0xC0,0xFF,0xF9,0x00,0xE0,0x81,0xB9,0x00,0x00,0x19,0x54,
1110x23,0xFC,0x00,0x00,0x09,0x09,0xFF,0xF9,0x01,0x28,0x20,0x1F,0x4E,0x73,0x00,0xB9,
1120x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x10,0x00,0xB9,0x00,0x00,0x10,0x00,0x00,0x00,
1130x19,0x54,0x23,0xFC,0x40,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9,
1140x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x30,0x00,0xB9,0x00,0x00,0x20,0x00,0x00,0x00,
1150x19,0x54,0x23,0xFC,0x20,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9,
1160x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x50,0x00,0xB9,0x00,0x00,0x40,0x00,0x00,0x00,
1170x19,0x54,0x23,0xFC,0x10,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x00,0xB9,
1180x00,0x00,0x00,0x00,0xFF,0xFC,0x16,0x70,0x00,0xB9,0x00,0x00,0x80,0x00,0x00,0x00,
1190x19,0x54,0x23,0xFC,0x08,0x00,0x00,0x00,0xFF,0xFC,0x15,0x4C,0x4E,0x73,0x4E,0x73,
1200x2F,0x00,0x2F,0x01,0x2F,0x02,0x2F,0x08,0x2F,0x09,0x42,0x80,0x20,0x7C,0xFF,0xFB,
1210x00,0x00,0x32,0x10,0x02,0x81,0x00,0x00,0x00,0xE7,0x0C,0x41,0x00,0x42,0x66,0x00,
1220x00,0x0A,0x32,0x3C,0x0E,0x08,0x60,0x00,0x00,0x3E,0x0C,0x41,0x00,0x63,0x66,0x00,
1230x00,0x0A,0x32,0x3C,0x04,0x08,0x60,0x00,0x00,0x2E,0x0C,0x41,0x00,0x84,0x66,0x00,
1240x00,0x0A,0x32,0x3C,0x02,0x08,0x60,0x00,0x00,0x1E,0x0C,0x41,0x00,0xA5,0x66,0x00,
1250x00,0x0A,0x32,0x3C,0x0D,0x08,0x60,0x00,0x00,0x0E,0x32,0x3C,0x00,0x08,0x34,0x3C,
1260x80,0xE7,0x60,0x00,0x00,0x14,0x34,0x30,0x09,0xB0,0x00,0x00,0x19,0xAA,0x02,0x42,
1270x30,0x00,0x82,0x42,0x34,0x3C,0x80,0xFF,0xB2,0x70,0x09,0xB0,0x00,0x00,0x19,0xAC,
1280x67,0x00,0x00,0x0C,0x31,0x81,0x09,0xB0,0x00,0x00,0x19,0xAC,0x30,0x81,0x32,0x39,
1290xFF,0xFC,0x15,0x66,0xC2,0x70,0x09,0xB0,0x00,0x00,0x19,0x02,0x67,0x00,0x00,0x0C,
1300x32,0x10,0x02,0x41,0xFF,0xF7,0x60,0x00,0x00,0x08,0x32,0x10,0x00,0x41,0x00,0x08,
1310xC2,0x42,0x22,0x70,0x09,0xB0,0x00,0x00,0x10,0x04,0xB2,0xA9,0x00,0x04,0x67,0x00,
1320x00,0x12,0x23,0x41,0x00,0x04,0x23,0xF0,0x09,0xB0,0x00,0x00,0x18,0xB2,0xFF,0xF9,
1330x00,0xE4,0x54,0x88,0x58,0x80,0x0C,0x80,0x00,0x00,0x00,0x10,0x66,0x00,0xFF,0x34,
1340x22,0x5F,0x20,0x5F,0x24,0x1F,0x22,0x1F,0x20,0x1F,0x4E,0x75,0x61,0x00,0xFF,0x12,
1350x4E,0x73,0xFF,0xFC,0x16,0x00,0xFF,0xFC,0x16,0x20,0xFF,0xFC,0x16,0x40,0xFF,0xFC,
1360x16,0x60,0xFF,0xFC,0x0C,0x00,0xFF,0xFC,0x0D,0x00,0xFF,0xFC,0x0E,0x00,0xFF,0xFC,
1370x0F,0x00,0xFF,0xFC,0x00,0x00,0xFF,0xFC,0x01,0x40,0xFF,0xFC,0x02,0x80,0xFF,0xFC,
1380x03,0xC0,0xFF,0xFC,0x00,0x50,0xFF,0xFC,0x01,0x90,0xFF,0xFC,0x02,0xD0,0xFF,0xFC,
1390x04,0x10,0x00,0x00,0x40,0x00,0x00,0x01,0x2F,0x60,0x00,0x02,0x1E,0xC0,0x00,0x03,
1400x0E,0x20,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x02,0x00,0x00,0x00,0x04,0x00,0x00,
1410x00,0x08,0x00,0x00,0x00,0x20,0x00,0x00,0x00,0x40,0x00,0x00,0x00,0x80,0x00,0x00,
1420x01,0x00,0x00,0x00,0x00,0x10,0x00,0x00,0x00,0x11,0x00,0x00,0x00,0x12,0x00,0x00,
1430x00,0x13,0x00,0x00,0x00,0x2C,0x00,0x00,0x3E,0x00,0x00,0x2C,0x00,0x00,0x3E,0x00,
1440x00,0x00,0x00,0x00,0x00,0x2D,0x00,0x00,0x3F,0x00,0x00,0x2D,0x00,0x00,0x3F,0x00,
1450x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,0x00,0xFF,0x00,
1460x00,0x00,0x00,0x20,0x00,0x00,0x00,0x80,0x00,0x00,0x02,0x00,0x00,0x00,0x08,0x00,
1470x77,0x61,0x6E,0x58,0x4C,0x20,0x66,0x69,0x72,0x6D,0x77,0x61,0x72,0x65,0x0A,0x43,
1480x6F,0x70,0x79,0x72,0x69,0x67,0x68,0x74,0x20,0x28,0x43,0x29,0x20,0x32,0x30,0x30,
1490x33,0x20,0x4B,0x72,0x7A,0x79,0x73,0x7A,0x74,0x6F,0x66,0x20,0x48,0x61,0x6C,0x61,
1500x73,0x61,0x20,0x3C,0x6B,0x68,0x63,0x40,0x70,0x6D,0x2E,0x77,0x61,0x77,0x2E,0x70,
1510x6C,0x3E,0x0A,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1520x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1530x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1540x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1550x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1560x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1570x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
158};
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
new file mode 100644
index 000000000000..8c5cfcb55826
--- /dev/null
+++ b/drivers/net/wan/x25_asy.c
@@ -0,0 +1,844 @@
1/*
2 * Things to sort out:
3 *
4 * o tbusy handling
5 * o allow users to set the parameters
6 * o sync/async switching ?
7 *
8 * Note: This does _not_ implement CCITT X.25 asynchronous framing
9 * recommendations. Its primarily for testing purposes. If you wanted
10 * to do CCITT then in theory all you need is to nick the HDLC async
11 * checksum routines from ppp.c
12 * Changes:
13 *
14 * 2000-10-29 Henner Eisen lapb_data_indication() return status.
15 */
16
17#include <linux/module.h>
18
19#include <asm/system.h>
20#include <asm/uaccess.h>
21#include <linux/bitops.h>
22#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/interrupt.h>
25#include <linux/in.h>
26#include <linux/tty.h>
27#include <linux/errno.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/skbuff.h>
31#include <linux/if_arp.h>
32#include <linux/x25.h>
33#include <linux/lapb.h>
34#include <linux/init.h>
35#include "x25_asy.h"
36
37#include <net/x25device.h>
38
39static struct net_device **x25_asy_devs;
40static int x25_asy_maxdev = SL_NRUNIT;
41
42module_param(x25_asy_maxdev, int, 0);
43MODULE_LICENSE("GPL");
44
45static int x25_asy_esc(unsigned char *p, unsigned char *d, int len);
46static void x25_asy_unesc(struct x25_asy *sl, unsigned char c);
47static void x25_asy_setup(struct net_device *dev);
48
49/* Find a free X.25 channel, and link in this `tty' line. */
50static struct x25_asy *x25_asy_alloc(void)
51{
52 struct net_device *dev = NULL;
53 struct x25_asy *sl;
54 int i;
55
56 if (x25_asy_devs == NULL)
57 return NULL; /* Master array missing ! */
58
59 for (i = 0; i < x25_asy_maxdev; i++) {
60 dev = x25_asy_devs[i];
61
62 /* Not allocated ? */
63 if (dev == NULL)
64 break;
65
66 sl = dev->priv;
67 /* Not in use ? */
68 if (!test_and_set_bit(SLF_INUSE, &sl->flags))
69 return sl;
70 }
71
72
73 /* Sorry, too many, all slots in use */
74 if (i >= x25_asy_maxdev)
75 return NULL;
76
77 /* If no channels are available, allocate one */
78 if (!dev) {
79 char name[IFNAMSIZ];
80 sprintf(name, "x25asy%d", i);
81
82 dev = alloc_netdev(sizeof(struct x25_asy),
83 name, x25_asy_setup);
84 if (!dev)
85 return NULL;
86
87 /* Initialize channel control data */
88 sl = dev->priv;
89 dev->base_addr = i;
90
91 /* register device so that it can be ifconfig'ed */
92 if (register_netdev(dev) == 0) {
93 /* (Re-)Set the INUSE bit. Very Important! */
94 set_bit(SLF_INUSE, &sl->flags);
95 x25_asy_devs[i] = dev;
96 return sl;
97 } else {
98 printk("x25_asy_alloc() - register_netdev() failure.\n");
99 free_netdev(dev);
100 }
101 }
102 return NULL;
103}
104
105
106/* Free an X.25 channel. */
107static void x25_asy_free(struct x25_asy *sl)
108{
109 /* Free all X.25 frame buffers. */
110 if (sl->rbuff) {
111 kfree(sl->rbuff);
112 }
113 sl->rbuff = NULL;
114 if (sl->xbuff) {
115 kfree(sl->xbuff);
116 }
117 sl->xbuff = NULL;
118
119 if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) {
120 printk("%s: x25_asy_free for already free unit.\n", sl->dev->name);
121 }
122}
123
124static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
125{
126 struct x25_asy *sl = dev->priv;
127 unsigned char *xbuff, *rbuff;
128 int len = 2* newmtu;
129
130 xbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC);
131 rbuff = (unsigned char *) kmalloc (len + 4, GFP_ATOMIC);
132
133 if (xbuff == NULL || rbuff == NULL)
134 {
135 printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n",
136 dev->name);
137 if (xbuff != NULL)
138 kfree(xbuff);
139 if (rbuff != NULL)
140 kfree(rbuff);
141 return -ENOMEM;
142 }
143
144 spin_lock_bh(&sl->lock);
145 xbuff = xchg(&sl->xbuff, xbuff);
146 if (sl->xleft) {
147 if (sl->xleft <= len) {
148 memcpy(sl->xbuff, sl->xhead, sl->xleft);
149 } else {
150 sl->xleft = 0;
151 sl->stats.tx_dropped++;
152 }
153 }
154 sl->xhead = sl->xbuff;
155
156 rbuff = xchg(&sl->rbuff, rbuff);
157 if (sl->rcount) {
158 if (sl->rcount <= len) {
159 memcpy(sl->rbuff, rbuff, sl->rcount);
160 } else {
161 sl->rcount = 0;
162 sl->stats.rx_over_errors++;
163 set_bit(SLF_ERROR, &sl->flags);
164 }
165 }
166
167 dev->mtu = newmtu;
168 sl->buffsize = len;
169
170 spin_unlock_bh(&sl->lock);
171
172 if (xbuff != NULL)
173 kfree(xbuff);
174 if (rbuff != NULL)
175 kfree(rbuff);
176 return 0;
177}
178
179
180/* Set the "sending" flag. This must be atomic, hence the ASM. */
181
182static inline void x25_asy_lock(struct x25_asy *sl)
183{
184 netif_stop_queue(sl->dev);
185}
186
187
188/* Clear the "sending" flag. This must be atomic, hence the ASM. */
189
190static inline void x25_asy_unlock(struct x25_asy *sl)
191{
192 netif_wake_queue(sl->dev);
193}
194
195/* Send one completely decapsulated IP datagram to the IP layer. */
196
197static void x25_asy_bump(struct x25_asy *sl)
198{
199 struct sk_buff *skb;
200 int count;
201 int err;
202
203 count = sl->rcount;
204 sl->stats.rx_bytes+=count;
205
206 skb = dev_alloc_skb(count+1);
207 if (skb == NULL)
208 {
209 printk("%s: memory squeeze, dropping packet.\n", sl->dev->name);
210 sl->stats.rx_dropped++;
211 return;
212 }
213 skb_push(skb,1); /* LAPB internal control */
214 memcpy(skb_put(skb,count), sl->rbuff, count);
215 skb->protocol = x25_type_trans(skb, sl->dev);
216 if((err=lapb_data_received(skb->dev, skb))!=LAPB_OK)
217 {
218 kfree_skb(skb);
219 printk(KERN_DEBUG "x25_asy: data received err - %d\n",err);
220 }
221 else
222 {
223 netif_rx(skb);
224 sl->dev->last_rx = jiffies;
225 sl->stats.rx_packets++;
226 }
227}
228
229/* Encapsulate one IP datagram and stuff into a TTY queue. */
230static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
231{
232 unsigned char *p;
233 int actual, count, mtu = sl->dev->mtu;
234
235 if (len > mtu)
236 { /* Sigh, shouldn't occur BUT ... */
237 len = mtu;
238 printk ("%s: truncating oversized transmit packet!\n", sl->dev->name);
239 sl->stats.tx_dropped++;
240 x25_asy_unlock(sl);
241 return;
242 }
243
244 p = icp;
245 count = x25_asy_esc(p, (unsigned char *) sl->xbuff, len);
246
247 /* Order of next two lines is *very* important.
248 * When we are sending a little amount of data,
249 * the transfer may be completed inside driver.write()
250 * routine, because it's running with interrupts enabled.
251 * In this case we *never* got WRITE_WAKEUP event,
252 * if we did not request it before write operation.
253 * 14 Oct 1994 Dmitry Gorodchanin.
254 */
255 sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
256 actual = sl->tty->driver->write(sl->tty, sl->xbuff, count);
257 sl->xleft = count - actual;
258 sl->xhead = sl->xbuff + actual;
259 /* VSV */
260 clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */
261}
262
263/*
264 * Called by the driver when there's room for more data. If we have
265 * more packets to send, we send them here.
266 */
267static void x25_asy_write_wakeup(struct tty_struct *tty)
268{
269 int actual;
270 struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
271
272 /* First make sure we're connected. */
273 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
274 return;
275
276 if (sl->xleft <= 0)
277 {
278 /* Now serial buffer is almost free & we can start
279 * transmission of another packet */
280 sl->stats.tx_packets++;
281 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
282 x25_asy_unlock(sl);
283 return;
284 }
285
286 actual = tty->driver->write(tty, sl->xhead, sl->xleft);
287 sl->xleft -= actual;
288 sl->xhead += actual;
289}
290
291static void x25_asy_timeout(struct net_device *dev)
292{
293 struct x25_asy *sl = (struct x25_asy*)(dev->priv);
294
295 spin_lock(&sl->lock);
296 if (netif_queue_stopped(dev)) {
297 /* May be we must check transmitter timeout here ?
298 * 14 Oct 1994 Dmitry Gorodchanin.
299 */
300 printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
301 (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
302 "bad line quality" : "driver error");
303 sl->xleft = 0;
304 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
305 x25_asy_unlock(sl);
306 }
307 spin_unlock(&sl->lock);
308}
309
310/* Encapsulate an IP datagram and kick it into a TTY queue. */
311
312static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
313{
314 struct x25_asy *sl = (struct x25_asy*)(dev->priv);
315 int err;
316
317 if (!netif_running(sl->dev)) {
318 printk("%s: xmit call when iface is down\n", dev->name);
319 kfree_skb(skb);
320 return 0;
321 }
322
323 switch(skb->data[0])
324 {
325 case 0x00:break;
326 case 0x01: /* Connection request .. do nothing */
327 if((err=lapb_connect_request(dev))!=LAPB_OK)
328 printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
329 kfree_skb(skb);
330 return 0;
331 case 0x02: /* Disconnect request .. do nothing - hang up ?? */
332 if((err=lapb_disconnect_request(dev))!=LAPB_OK)
333 printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
334 default:
335 kfree_skb(skb);
336 return 0;
337 }
338 skb_pull(skb,1); /* Remove control byte */
339 /*
340 * If we are busy already- too bad. We ought to be able
341 * to queue things at this point, to allow for a little
342 * frame buffer. Oh well...
343 * -----------------------------------------------------
344 * I hate queues in X.25 driver. May be it's efficient,
345 * but for me latency is more important. ;)
346 * So, no queues !
347 * 14 Oct 1994 Dmitry Gorodchanin.
348 */
349
350 if((err=lapb_data_request(dev,skb))!=LAPB_OK)
351 {
352 printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err);
353 kfree_skb(skb);
354 return 0;
355 }
356 return 0;
357}
358
359
360/*
361 * LAPB interface boilerplate
362 */
363
364/*
365 * Called when I frame data arrives. We did the work above - throw it
366 * at the net layer.
367 */
368
369static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
370{
371 skb->dev->last_rx = jiffies;
372 return netif_rx(skb);
373}
374
375/*
376 * Data has emerged from the LAPB protocol machine. We don't handle
377 * busy cases too well. Its tricky to see how to do this nicely -
378 * perhaps lapb should allow us to bounce this ?
379 */
380
381static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
382{
383 struct x25_asy *sl=dev->priv;
384
385 spin_lock(&sl->lock);
386 if (netif_queue_stopped(sl->dev) || sl->tty == NULL)
387 {
388 spin_unlock(&sl->lock);
389 printk(KERN_ERR "x25_asy: tbusy drop\n");
390 kfree_skb(skb);
391 return;
392 }
393 /* We were not busy, so we are now... :-) */
394 if (skb != NULL)
395 {
396 x25_asy_lock(sl);
397 sl->stats.tx_bytes+=skb->len;
398 x25_asy_encaps(sl, skb->data, skb->len);
399 dev_kfree_skb(skb);
400 }
401 spin_unlock(&sl->lock);
402}
403
404/*
405 * LAPB connection establish/down information.
406 */
407
408static void x25_asy_connected(struct net_device *dev, int reason)
409{
410 struct x25_asy *sl = dev->priv;
411 struct sk_buff *skb;
412 unsigned char *ptr;
413
414 if ((skb = dev_alloc_skb(1)) == NULL) {
415 printk(KERN_ERR "lapbeth: out of memory\n");
416 return;
417 }
418
419 ptr = skb_put(skb, 1);
420 *ptr = 0x01;
421
422 skb->protocol = x25_type_trans(skb, sl->dev);
423 netif_rx(skb);
424 sl->dev->last_rx = jiffies;
425}
426
427static void x25_asy_disconnected(struct net_device *dev, int reason)
428{
429 struct x25_asy *sl = dev->priv;
430 struct sk_buff *skb;
431 unsigned char *ptr;
432
433 if ((skb = dev_alloc_skb(1)) == NULL) {
434 printk(KERN_ERR "x25_asy: out of memory\n");
435 return;
436 }
437
438 ptr = skb_put(skb, 1);
439 *ptr = 0x02;
440
441 skb->protocol = x25_type_trans(skb, sl->dev);
442 netif_rx(skb);
443 sl->dev->last_rx = jiffies;
444}
445
446static struct lapb_register_struct x25_asy_callbacks = {
447 .connect_confirmation = x25_asy_connected,
448 .connect_indication = x25_asy_connected,
449 .disconnect_confirmation = x25_asy_disconnected,
450 .disconnect_indication = x25_asy_disconnected,
451 .data_indication = x25_asy_data_indication,
452 .data_transmit = x25_asy_data_transmit,
453
454};
455
456
457/* Open the low-level part of the X.25 channel. Easy! */
458static int x25_asy_open(struct net_device *dev)
459{
460 struct x25_asy *sl = (struct x25_asy*)(dev->priv);
461 unsigned long len;
462 int err;
463
464 if (sl->tty == NULL)
465 return -ENODEV;
466
467 /*
468 * Allocate the X.25 frame buffers:
469 *
470 * rbuff Receive buffer.
471 * xbuff Transmit buffer.
472 */
473
474 len = dev->mtu * 2;
475
476 sl->rbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL);
477 if (sl->rbuff == NULL) {
478 goto norbuff;
479 }
480 sl->xbuff = (unsigned char *) kmalloc(len + 4, GFP_KERNEL);
481 if (sl->xbuff == NULL) {
482 goto noxbuff;
483 }
484
485 sl->buffsize = len;
486 sl->rcount = 0;
487 sl->xleft = 0;
488 sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */
489
490 netif_start_queue(dev);
491
492 /*
493 * Now attach LAPB
494 */
495 if((err=lapb_register(dev, &x25_asy_callbacks))==LAPB_OK)
496 return 0;
497
498 /* Cleanup */
499 kfree(sl->xbuff);
500noxbuff:
501 kfree(sl->rbuff);
502norbuff:
503 return -ENOMEM;
504}
505
506
507/* Close the low-level part of the X.25 channel. Easy! */
508static int x25_asy_close(struct net_device *dev)
509{
510 struct x25_asy *sl = (struct x25_asy*)(dev->priv);
511 int err;
512
513 spin_lock(&sl->lock);
514 if (sl->tty)
515 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
516
517 netif_stop_queue(dev);
518 sl->rcount = 0;
519 sl->xleft = 0;
520 if((err=lapb_unregister(dev))!=LAPB_OK)
521 printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",err);
522 spin_unlock(&sl->lock);
523 return 0;
524}
525
526static int x25_asy_receive_room(struct tty_struct *tty)
527{
528 return 65536; /* We can handle an infinite amount of data. :-) */
529}
530
531/*
532 * Handle the 'receiver data ready' interrupt.
533 * This function is called by the 'tty_io' module in the kernel when
534 * a block of X.25 data has been received, which can now be decapsulated
535 * and sent on to some IP layer for further processing.
536 */
537
538static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
539{
540 struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
541
542 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
543 return;
544
545
546 /* Read the characters out of the buffer */
547 while (count--) {
548 if (fp && *fp++) {
549 if (!test_and_set_bit(SLF_ERROR, &sl->flags)) {
550 sl->stats.rx_errors++;
551 }
552 cp++;
553 continue;
554 }
555 x25_asy_unesc(sl, *cp++);
556 }
557}
558
559/*
560 * Open the high-level part of the X.25 channel.
561 * This function is called by the TTY module when the
562 * X.25 line discipline is called for. Because we are
563 * sure the tty line exists, we only have to link it to
564 * a free X.25 channel...
565 */
566
567static int x25_asy_open_tty(struct tty_struct *tty)
568{
569 struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
570 int err;
571
572 /* First make sure we're not already connected. */
573 if (sl && sl->magic == X25_ASY_MAGIC) {
574 return -EEXIST;
575 }
576
577 /* OK. Find a free X.25 channel to use. */
578 if ((sl = x25_asy_alloc()) == NULL) {
579 return -ENFILE;
580 }
581
582 sl->tty = tty;
583 tty->disc_data = sl;
584 if (tty->driver->flush_buffer) {
585 tty->driver->flush_buffer(tty);
586 }
587 if (tty->ldisc.flush_buffer) {
588 tty->ldisc.flush_buffer(tty);
589 }
590
591 /* Restore default settings */
592 sl->dev->type = ARPHRD_X25;
593
594 /* Perform the low-level X.25 async init */
595 if ((err = x25_asy_open(sl->dev)))
596 return err;
597
598 /* Done. We have linked the TTY line to a channel. */
599 return sl->dev->base_addr;
600}
601
602
603/*
604 * Close down an X.25 channel.
605 * This means flushing out any pending queues, and then restoring the
606 * TTY line discipline to what it was before it got hooked to X.25
607 * (which usually is TTY again).
608 */
609static void x25_asy_close_tty(struct tty_struct *tty)
610{
611 struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
612
613 /* First make sure we're connected. */
614 if (!sl || sl->magic != X25_ASY_MAGIC)
615 return;
616
617 if (sl->dev->flags & IFF_UP)
618 {
619 (void) dev_close(sl->dev);
620 }
621
622 tty->disc_data = NULL;
623 sl->tty = NULL;
624 x25_asy_free(sl);
625}
626
627
628static struct net_device_stats *x25_asy_get_stats(struct net_device *dev)
629{
630 struct x25_asy *sl = (struct x25_asy*)(dev->priv);
631
632 return &sl->stats;
633}
634
635
636 /************************************************************************
637 * STANDARD X.25 ENCAPSULATION *
638 ************************************************************************/
639
640int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
641{
642 unsigned char *ptr = d;
643 unsigned char c;
644
645 /*
646 * Send an initial END character to flush out any
647 * data that may have accumulated in the receiver
648 * due to line noise.
649 */
650
651 *ptr++ = X25_END; /* Send 10111110 bit seq */
652
653 /*
654 * For each byte in the packet, send the appropriate
655 * character sequence, according to the X.25 protocol.
656 */
657
658 while (len-- > 0)
659 {
660 switch(c = *s++)
661 {
662 case X25_END:
663 *ptr++ = X25_ESC;
664 *ptr++ = X25_ESCAPE(X25_END);
665 break;
666 case X25_ESC:
667 *ptr++ = X25_ESC;
668 *ptr++ = X25_ESCAPE(X25_ESC);
669 break;
670 default:
671 *ptr++ = c;
672 break;
673 }
674 }
675 *ptr++ = X25_END;
676 return (ptr - d);
677}
678
679static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
680{
681
682 switch(s)
683 {
684 case X25_END:
685 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2))
686 {
687 x25_asy_bump(sl);
688 }
689 clear_bit(SLF_ESCAPE, &sl->flags);
690 sl->rcount = 0;
691 return;
692
693 case X25_ESC:
694 set_bit(SLF_ESCAPE, &sl->flags);
695 return;
696
697 case X25_ESCAPE(X25_ESC):
698 case X25_ESCAPE(X25_END):
699 if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
700 s = X25_UNESCAPE(s);
701 break;
702 }
703 if (!test_bit(SLF_ERROR, &sl->flags))
704 {
705 if (sl->rcount < sl->buffsize)
706 {
707 sl->rbuff[sl->rcount++] = s;
708 return;
709 }
710 sl->stats.rx_over_errors++;
711 set_bit(SLF_ERROR, &sl->flags);
712 }
713}
714
715
716/* Perform I/O control on an active X.25 channel. */
717static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
718 unsigned int cmd, unsigned long arg)
719{
720 struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
721
722 /* First make sure we're connected. */
723 if (!sl || sl->magic != X25_ASY_MAGIC)
724 return -EINVAL;
725
726 switch(cmd) {
727 case SIOCGIFNAME:
728 if (copy_to_user((void __user *)arg, sl->dev->name,
729 strlen(sl->dev->name) + 1))
730 return -EFAULT;
731 return 0;
732 case SIOCSIFHWADDR:
733 return -EINVAL;
734 /* Allow stty to read, but not set, the serial port */
735 case TCGETS:
736 case TCGETA:
737 return n_tty_ioctl(tty, file, cmd, arg);
738 default:
739 return -ENOIOCTLCMD;
740 }
741}
742
743static int x25_asy_open_dev(struct net_device *dev)
744{
745 struct x25_asy *sl = (struct x25_asy*)(dev->priv);
746 if(sl->tty==NULL)
747 return -ENODEV;
748 return 0;
749}
750
751/* Initialise the X.25 driver. Called by the device init code */
752static void x25_asy_setup(struct net_device *dev)
753{
754 struct x25_asy *sl = dev->priv;
755
756 sl->magic = X25_ASY_MAGIC;
757 sl->dev = dev;
758 spin_lock_init(&sl->lock);
759 set_bit(SLF_INUSE, &sl->flags);
760
761 /*
762 * Finish setting up the DEVICE info.
763 */
764
765 dev->mtu = SL_MTU;
766 dev->hard_start_xmit = x25_asy_xmit;
767 dev->tx_timeout = x25_asy_timeout;
768 dev->watchdog_timeo = HZ*20;
769 dev->open = x25_asy_open_dev;
770 dev->stop = x25_asy_close;
771 dev->get_stats = x25_asy_get_stats;
772 dev->change_mtu = x25_asy_change_mtu;
773 dev->hard_header_len = 0;
774 dev->addr_len = 0;
775 dev->type = ARPHRD_X25;
776 dev->tx_queue_len = 10;
777
778 /* New-style flags. */
779 dev->flags = IFF_NOARP;
780}
781
782static struct tty_ldisc x25_ldisc = {
783 .owner = THIS_MODULE,
784 .magic = TTY_LDISC_MAGIC,
785 .name = "X.25",
786 .open = x25_asy_open_tty,
787 .close = x25_asy_close_tty,
788 .ioctl = x25_asy_ioctl,
789 .receive_buf = x25_asy_receive_buf,
790 .receive_room = x25_asy_receive_room,
791 .write_wakeup = x25_asy_write_wakeup,
792};
793
794static int __init init_x25_asy(void)
795{
796 if (x25_asy_maxdev < 4)
797 x25_asy_maxdev = 4; /* Sanity */
798
799 printk(KERN_INFO "X.25 async: version 0.00 ALPHA "
800 "(dynamic channels, max=%d).\n", x25_asy_maxdev );
801
802 x25_asy_devs = kmalloc(sizeof(struct net_device *)*x25_asy_maxdev,
803 GFP_KERNEL);
804 if (!x25_asy_devs) {
805 printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] "
806 "array! Uaargh! (-> No X.25 available)\n");
807 return -ENOMEM;
808 }
809 memset(x25_asy_devs, 0, sizeof(struct net_device *)*x25_asy_maxdev);
810
811 return tty_register_ldisc(N_X25, &x25_ldisc);
812}
813
814
815static void __exit exit_x25_asy(void)
816{
817 struct net_device *dev;
818 int i;
819
820 for (i = 0; i < x25_asy_maxdev; i++) {
821 dev = x25_asy_devs[i];
822 if (dev) {
823 struct x25_asy *sl = dev->priv;
824
825 spin_lock_bh(&sl->lock);
826 if (sl->tty)
827 tty_hangup(sl->tty);
828
829 spin_unlock_bh(&sl->lock);
830 /*
831 * VSV = if dev->start==0, then device
832 * unregistered while close proc.
833 */
834 unregister_netdev(dev);
835 free_netdev(dev);
836 }
837 }
838
839 kfree(x25_asy_devs);
840 tty_register_ldisc(N_X25, NULL);
841}
842
843module_init(init_x25_asy);
844module_exit(exit_x25_asy);
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
new file mode 100644
index 000000000000..41770200ceb6
--- /dev/null
+++ b/drivers/net/wan/x25_asy.h
@@ -0,0 +1,50 @@
1#ifndef _LINUX_X25_ASY_H
2#define _LINUX_X25_ASY_H
3
4/* X.25 asy configuration. */
5#define SL_NRUNIT 256 /* MAX number of X.25 channels;
6 This can be overridden with
7 insmod -ox25_asy_maxdev=nnn */
8#define SL_MTU 256
9
10/* X25 async protocol characters. */
11#define X25_END 0x7E /* indicates end of frame */
12#define X25_ESC 0x7D /* indicates byte stuffing */
13#define X25_ESCAPE(x) ((x)^0x20)
14#define X25_UNESCAPE(x) ((x)^0x20)
15
16
17struct x25_asy {
18 int magic;
19
20 /* Various fields. */
21 spinlock_t lock;
22 struct tty_struct *tty; /* ptr to TTY structure */
23 struct net_device *dev; /* easy for intr handling */
24
25 /* These are pointers to the malloc()ed frame buffers. */
26 unsigned char *rbuff; /* receiver buffer */
27 int rcount; /* received chars counter */
28 unsigned char *xbuff; /* transmitter buffer */
29 unsigned char *xhead; /* pointer to next byte to XMIT */
30 int xleft; /* bytes left in XMIT queue */
31
32 /* X.25 interface statistics. */
33 struct net_device_stats stats;
34
35 int buffsize; /* Max buffers sizes */
36
37 unsigned long flags; /* Flag values/ mode etc */
38#define SLF_INUSE 0 /* Channel in use */
39#define SLF_ESCAPE 1 /* ESC received */
40#define SLF_ERROR 2 /* Parity, etc. error */
41#define SLF_OUTWAIT 4 /* Waiting for output */
42};
43
44
45
46#define X25_ASY_MAGIC 0x5303
47
48extern int x25_asy_init(struct net_device *dev);
49
50#endif /* _LINUX_X25_ASY.H */
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
new file mode 100644
index 000000000000..caa48f12fd0f
--- /dev/null
+++ b/drivers/net/wan/z85230.c
@@ -0,0 +1,1851 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
8 * (c) Copyright 2000, 2001 Red Hat Inc
9 *
10 * Development of this driver was funded by Equiinet Ltd
11 * http://www.equiinet.com
12 *
13 * ChangeLog:
14 *
15 * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
16 * unification of all the Z85x30 asynchronous drivers for real.
17 *
18 * DMA now uses get_free_page as kmalloc buffers may span a 64K
19 * boundary.
20 *
21 * Modified for SMP safety and SMP locking by Alan Cox <alan@redhat.com>
22 *
23 * Performance
24 *
25 * Z85230:
26 * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
27 * X.25 is not unrealistic on all machines. DMA mode can in theory
28 * handle T1/E1 quite nicely. In practice the limit seems to be about
29 * 512Kbit->1Mbit depending on motherboard.
30 *
31 * Z85C30:
32 * 64K will take DMA, 9600 baud X.25 should be ok.
33 *
34 * Z8530:
35 * Synchronous mode without DMA is unlikely to pass about 2400 baud.
36 */
37
38#include <linux/module.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/net.h>
42#include <linux/skbuff.h>
43#include <linux/netdevice.h>
44#include <linux/if_arp.h>
45#include <linux/delay.h>
46#include <linux/ioport.h>
47#include <linux/init.h>
48#include <asm/dma.h>
49#include <asm/io.h>
50#define RT_LOCK
51#define RT_UNLOCK
52#include <linux/spinlock.h>
53
54#include <net/syncppp.h>
55#include "z85230.h"
56
57
58/**
59 * z8530_read_port - Architecture specific interface function
60 * @p: port to read
61 *
62 * Provided port access methods. The Comtrol SV11 requires no delays
63 * between accesses and uses PC I/O. Some drivers may need a 5uS delay
64 *
65 * In the longer term this should become an architecture specific
66 * section so that this can become a generic driver interface for all
67 * platforms. For now we only handle PC I/O ports with or without the
68 * dread 5uS sanity delay.
69 *
70 * The caller must hold sufficient locks to avoid violating the horrible
71 * 5uS delay rule.
72 */
73
74static inline int z8530_read_port(unsigned long p)
75{
76 u8 r=inb(Z8530_PORT_OF(p));
77 if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
78 udelay(5);
79 return r;
80}
81
82/**
83 * z8530_write_port - Architecture specific interface function
84 * @p: port to write
85 * @d: value to write
86 *
87 * Write a value to a port with delays if need be. Note that the
88 * caller must hold locks to avoid read/writes from other contexts
89 * violating the 5uS rule
90 *
91 * In the longer term this should become an architecture specific
92 * section so that this can become a generic driver interface for all
93 * platforms. For now we only handle PC I/O ports with or without the
94 * dread 5uS sanity delay.
95 */
96
97
98static inline void z8530_write_port(unsigned long p, u8 d)
99{
100 outb(d,Z8530_PORT_OF(p));
101 if(p&Z8530_PORT_SLEEP)
102 udelay(5);
103}
104
105
106
107static void z8530_rx_done(struct z8530_channel *c);
108static void z8530_tx_done(struct z8530_channel *c);
109
110
111/**
112 * read_zsreg - Read a register from a Z85230
113 * @c: Z8530 channel to read from (2 per chip)
114 * @reg: Register to read
115 * FIXME: Use a spinlock.
116 *
117 * Most of the Z8530 registers are indexed off the control registers.
118 * A read is done by writing to the control register and reading the
119 * register back. The caller must hold the lock
120 */
121
122static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
123{
124 if(reg)
125 z8530_write_port(c->ctrlio, reg);
126 return z8530_read_port(c->ctrlio);
127}
128
129/**
130 * read_zsdata - Read the data port of a Z8530 channel
131 * @c: The Z8530 channel to read the data port from
132 *
133 * The data port provides fast access to some things. We still
134 * have all the 5uS delays to worry about.
135 */
136
137static inline u8 read_zsdata(struct z8530_channel *c)
138{
139 u8 r;
140 r=z8530_read_port(c->dataio);
141 return r;
142}
143
144/**
145 * write_zsreg - Write to a Z8530 channel register
146 * @c: The Z8530 channel
147 * @reg: Register number
148 * @val: Value to write
149 *
150 * Write a value to an indexed register. The caller must hold the lock
151 * to honour the irritating delay rules. We know about register 0
152 * being fast to access.
153 *
154 * Assumes c->lock is held.
155 */
156static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
157{
158 if(reg)
159 z8530_write_port(c->ctrlio, reg);
160 z8530_write_port(c->ctrlio, val);
161
162}
163
164/**
165 * write_zsctrl - Write to a Z8530 control register
166 * @c: The Z8530 channel
167 * @val: Value to write
168 *
169 * Write directly to the control register on the Z8530
170 */
171
172static inline void write_zsctrl(struct z8530_channel *c, u8 val)
173{
174 z8530_write_port(c->ctrlio, val);
175}
176
177/**
178 * write_zsdata - Write to a Z8530 control register
179 * @c: The Z8530 channel
180 * @val: Value to write
181 *
182 * Write directly to the data register on the Z8530
183 */
184
185
186static inline void write_zsdata(struct z8530_channel *c, u8 val)
187{
188 z8530_write_port(c->dataio, val);
189}
190
191/*
192 * Register loading parameters for a dead port
193 */
194
195u8 z8530_dead_port[]=
196{
197 255
198};
199
200EXPORT_SYMBOL(z8530_dead_port);
201
202/*
203 * Register loading parameters for currently supported circuit types
204 */
205
206
207/*
208 * Data clocked by telco end. This is the correct data for the UK
209 * "kilostream" service, and most other similar services.
210 */
211
212u8 z8530_hdlc_kilostream[]=
213{
214 4, SYNC_ENAB|SDLC|X1CLK,
215 2, 0, /* No vector */
216 1, 0,
217 3, ENT_HM|RxCRC_ENAB|Rx8,
218 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
219 9, 0, /* Disable interrupts */
220 6, 0xFF,
221 7, FLAG,
222 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
223 11, TCTRxCP,
224 14, DISDPLL,
225 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
226 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
227 9, NV|MIE|NORESET,
228 255
229};
230
231EXPORT_SYMBOL(z8530_hdlc_kilostream);
232
233/*
234 * As above but for enhanced chips.
235 */
236
237u8 z8530_hdlc_kilostream_85230[]=
238{
239 4, SYNC_ENAB|SDLC|X1CLK,
240 2, 0, /* No vector */
241 1, 0,
242 3, ENT_HM|RxCRC_ENAB|Rx8,
243 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
244 9, 0, /* Disable interrupts */
245 6, 0xFF,
246 7, FLAG,
247 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
248 11, TCTRxCP,
249 14, DISDPLL,
250 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
251 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
252 9, NV|MIE|NORESET,
253 23, 3, /* Extended mode AUTO TX and EOM*/
254
255 255
256};
257
258EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
259
260/**
261 * z8530_flush_fifo - Flush on chip RX FIFO
262 * @c: Channel to flush
263 *
264 * Flush the receive FIFO. There is no specific option for this, we
265 * blindly read bytes and discard them. Reading when there is no data
266 * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
267 *
268 * All locking is handled for the caller. On return data may still be
269 * present if it arrived during the flush.
270 */
271
272static void z8530_flush_fifo(struct z8530_channel *c)
273{
274 read_zsreg(c, R1);
275 read_zsreg(c, R1);
276 read_zsreg(c, R1);
277 read_zsreg(c, R1);
278 if(c->dev->type==Z85230)
279 {
280 read_zsreg(c, R1);
281 read_zsreg(c, R1);
282 read_zsreg(c, R1);
283 read_zsreg(c, R1);
284 }
285}
286
287/**
288 * z8530_rtsdtr - Control the outgoing DTS/RTS line
289 * @c: The Z8530 channel to control;
290 * @set: 1 to set, 0 to clear
291 *
292 * Sets or clears DTR/RTS on the requested line. All locking is handled
293 * by the caller. For now we assume all boards use the actual RTS/DTR
294 * on the chip. Apparently one or two don't. We'll scream about them
295 * later.
296 */
297
298static void z8530_rtsdtr(struct z8530_channel *c, int set)
299{
300 if (set)
301 c->regs[5] |= (RTS | DTR);
302 else
303 c->regs[5] &= ~(RTS | DTR);
304 write_zsreg(c, R5, c->regs[5]);
305}
306
307/**
308 * z8530_rx - Handle a PIO receive event
309 * @c: Z8530 channel to process
310 *
311 * Receive handler for receiving in PIO mode. This is much like the
312 * async one but not quite the same or as complex
313 *
314 * Note: Its intended that this handler can easily be separated from
315 * the main code to run realtime. That'll be needed for some machines
316 * (eg to ever clock 64kbits on a sparc ;)).
317 *
318 * The RT_LOCK macros don't do anything now. Keep the code covered
319 * by them as short as possible in all circumstances - clocks cost
320 * baud. The interrupt handler is assumed to be atomic w.r.t. to
321 * other code - this is true in the RT case too.
322 *
323 * We only cover the sync cases for this. If you want 2Mbit async
324 * do it yourself but consider medical assistance first. This non DMA
325 * synchronous mode is portable code. The DMA mode assumes PCI like
326 * ISA DMA
327 *
328 * Called with the device lock held
329 */
330
331static void z8530_rx(struct z8530_channel *c)
332{
333 u8 ch,stat;
334 spin_lock(c->lock);
335
336 while(1)
337 {
338 /* FIFO empty ? */
339 if(!(read_zsreg(c, R0)&1))
340 break;
341 ch=read_zsdata(c);
342 stat=read_zsreg(c, R1);
343
344 /*
345 * Overrun ?
346 */
347 if(c->count < c->max)
348 {
349 *c->dptr++=ch;
350 c->count++;
351 }
352
353 if(stat&END_FR)
354 {
355
356 /*
357 * Error ?
358 */
359 if(stat&(Rx_OVR|CRC_ERR))
360 {
361 /* Rewind the buffer and return */
362 if(c->skb)
363 c->dptr=c->skb->data;
364 c->count=0;
365 if(stat&Rx_OVR)
366 {
367 printk(KERN_WARNING "%s: overrun\n", c->dev->name);
368 c->rx_overrun++;
369 }
370 if(stat&CRC_ERR)
371 {
372 c->rx_crc_err++;
373 /* printk("crc error\n"); */
374 }
375 /* Shove the frame upstream */
376 }
377 else
378 {
379 /*
380 * Drop the lock for RX processing, or
381 * there are deadlocks
382 */
383 z8530_rx_done(c);
384 write_zsctrl(c, RES_Rx_CRC);
385 }
386 }
387 }
388 /*
389 * Clear irq
390 */
391 write_zsctrl(c, ERR_RES);
392 write_zsctrl(c, RES_H_IUS);
393 spin_unlock(c->lock);
394}
395
396
397/**
398 * z8530_tx - Handle a PIO transmit event
399 * @c: Z8530 channel to process
400 *
401 * Z8530 transmit interrupt handler for the PIO mode. The basic
402 * idea is to attempt to keep the FIFO fed. We fill as many bytes
403 * in as possible, its quite possible that we won't keep up with the
404 * data rate otherwise.
405 */
406
407static void z8530_tx(struct z8530_channel *c)
408{
409 spin_lock(c->lock);
410 while(c->txcount) {
411 /* FIFO full ? */
412 if(!(read_zsreg(c, R0)&4))
413 break;
414 c->txcount--;
415 /*
416 * Shovel out the byte
417 */
418 write_zsreg(c, R8, *c->tx_ptr++);
419 write_zsctrl(c, RES_H_IUS);
420 /* We are about to underflow */
421 if(c->txcount==0)
422 {
423 write_zsctrl(c, RES_EOM_L);
424 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
425 }
426 }
427
428
429 /*
430 * End of frame TX - fire another one
431 */
432
433 write_zsctrl(c, RES_Tx_P);
434
435 z8530_tx_done(c);
436 write_zsctrl(c, RES_H_IUS);
437 spin_unlock(c->lock);
438}
439
440/**
441 * z8530_status - Handle a PIO status exception
442 * @chan: Z8530 channel to process
443 *
444 * A status event occurred in PIO synchronous mode. There are several
445 * reasons the chip will bother us here. A transmit underrun means we
446 * failed to feed the chip fast enough and just broke a packet. A DCD
447 * change is a line up or down. We communicate that back to the protocol
448 * layer for synchronous PPP to renegotiate.
449 */
450
451static void z8530_status(struct z8530_channel *chan)
452{
453 u8 status, altered;
454
455 spin_lock(chan->lock);
456 status=read_zsreg(chan, R0);
457 altered=chan->status^status;
458
459 chan->status=status;
460
461 if(status&TxEOM)
462 {
463/* printk("%s: Tx underrun.\n", chan->dev->name); */
464 chan->stats.tx_fifo_errors++;
465 write_zsctrl(chan, ERR_RES);
466 z8530_tx_done(chan);
467 }
468
469 if(altered&chan->dcdcheck)
470 {
471 if(status&chan->dcdcheck)
472 {
473 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
474 write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
475 if(chan->netdevice &&
476 ((chan->netdevice->type == ARPHRD_HDLC) ||
477 (chan->netdevice->type == ARPHRD_PPP)))
478 sppp_reopen(chan->netdevice);
479 }
480 else
481 {
482 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
483 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
484 z8530_flush_fifo(chan);
485 }
486
487 }
488 write_zsctrl(chan, RES_EXT_INT);
489 write_zsctrl(chan, RES_H_IUS);
490 spin_unlock(chan->lock);
491}
492
493struct z8530_irqhandler z8530_sync=
494{
495 z8530_rx,
496 z8530_tx,
497 z8530_status
498};
499
500EXPORT_SYMBOL(z8530_sync);
501
502/**
503 * z8530_dma_rx - Handle a DMA RX event
504 * @chan: Channel to handle
505 *
506 * Non bus mastering DMA interfaces for the Z8x30 devices. This
507 * is really pretty PC specific. The DMA mode means that most receive
508 * events are handled by the DMA hardware. We get a kick here only if
509 * a frame ended.
510 */
511
512static void z8530_dma_rx(struct z8530_channel *chan)
513{
514 spin_lock(chan->lock);
515 if(chan->rxdma_on)
516 {
517 /* Special condition check only */
518 u8 status;
519
520 read_zsreg(chan, R7);
521 read_zsreg(chan, R6);
522
523 status=read_zsreg(chan, R1);
524
525 if(status&END_FR)
526 {
527 z8530_rx_done(chan); /* Fire up the next one */
528 }
529 write_zsctrl(chan, ERR_RES);
530 write_zsctrl(chan, RES_H_IUS);
531 }
532 else
533 {
534 /* DMA is off right now, drain the slow way */
535 z8530_rx(chan);
536 }
537 spin_unlock(chan->lock);
538}
539
540/**
541 * z8530_dma_tx - Handle a DMA TX event
542 * @chan: The Z8530 channel to handle
543 *
544 * We have received an interrupt while doing DMA transmissions. It
545 * shouldn't happen. Scream loudly if it does.
546 */
547
548static void z8530_dma_tx(struct z8530_channel *chan)
549{
550 spin_lock(chan->lock);
551 if(!chan->dma_tx)
552 {
553 printk(KERN_WARNING "Hey who turned the DMA off?\n");
554 z8530_tx(chan);
555 return;
556 }
557 /* This shouldnt occur in DMA mode */
558 printk(KERN_ERR "DMA tx - bogus event!\n");
559 z8530_tx(chan);
560 spin_unlock(chan->lock);
561}
562
563/**
564 * z8530_dma_status - Handle a DMA status exception
565 * @chan: Z8530 channel to process
566 *
567 * A status event occurred on the Z8530. We receive these for two reasons
568 * when in DMA mode. Firstly if we finished a packet transfer we get one
569 * and kick the next packet out. Secondly we may see a DCD change and
570 * have to poke the protocol layer.
571 *
572 */
573
574static void z8530_dma_status(struct z8530_channel *chan)
575{
576 u8 status, altered;
577
578 status=read_zsreg(chan, R0);
579 altered=chan->status^status;
580
581 chan->status=status;
582
583
584 if(chan->dma_tx)
585 {
586 if(status&TxEOM)
587 {
588 unsigned long flags;
589
590 flags=claim_dma_lock();
591 disable_dma(chan->txdma);
592 clear_dma_ff(chan->txdma);
593 chan->txdma_on=0;
594 release_dma_lock(flags);
595 z8530_tx_done(chan);
596 }
597 }
598
599 spin_lock(chan->lock);
600 if(altered&chan->dcdcheck)
601 {
602 if(status&chan->dcdcheck)
603 {
604 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
605 write_zsreg(chan, R3, chan->regs[3]|RxENABLE);
606 if(chan->netdevice &&
607 ((chan->netdevice->type == ARPHRD_HDLC) ||
608 (chan->netdevice->type == ARPHRD_PPP)))
609 sppp_reopen(chan->netdevice);
610 }
611 else
612 {
613 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
614 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE);
615 z8530_flush_fifo(chan);
616 }
617 }
618
619 write_zsctrl(chan, RES_EXT_INT);
620 write_zsctrl(chan, RES_H_IUS);
621 spin_unlock(chan->lock);
622}
623
624struct z8530_irqhandler z8530_dma_sync=
625{
626 z8530_dma_rx,
627 z8530_dma_tx,
628 z8530_dma_status
629};
630
631EXPORT_SYMBOL(z8530_dma_sync);
632
633struct z8530_irqhandler z8530_txdma_sync=
634{
635 z8530_rx,
636 z8530_dma_tx,
637 z8530_dma_status
638};
639
640EXPORT_SYMBOL(z8530_txdma_sync);
641
642/**
643 * z8530_rx_clear - Handle RX events from a stopped chip
644 * @c: Z8530 channel to shut up
645 *
646 * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
647 * For machines with PCI Z85x30 cards, or level triggered interrupts
648 * (eg the MacII) we must clear the interrupt cause or die.
649 */
650
651
652static void z8530_rx_clear(struct z8530_channel *c)
653{
654 /*
655 * Data and status bytes
656 */
657 u8 stat;
658
659 read_zsdata(c);
660 stat=read_zsreg(c, R1);
661
662 if(stat&END_FR)
663 write_zsctrl(c, RES_Rx_CRC);
664 /*
665 * Clear irq
666 */
667 write_zsctrl(c, ERR_RES);
668 write_zsctrl(c, RES_H_IUS);
669}
670
671/**
672 * z8530_tx_clear - Handle TX events from a stopped chip
673 * @c: Z8530 channel to shut up
674 *
675 * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
676 * For machines with PCI Z85x30 cards, or level triggered interrupts
677 * (eg the MacII) we must clear the interrupt cause or die.
678 */
679
680static void z8530_tx_clear(struct z8530_channel *c)
681{
682 write_zsctrl(c, RES_Tx_P);
683 write_zsctrl(c, RES_H_IUS);
684}
685
686/**
687 * z8530_status_clear - Handle status events from a stopped chip
688 * @chan: Z8530 channel to shut up
689 *
690 * Status interrupt vectors for a Z8530 that is in 'parked' mode.
691 * For machines with PCI Z85x30 cards, or level triggered interrupts
692 * (eg the MacII) we must clear the interrupt cause or die.
693 */
694
695static void z8530_status_clear(struct z8530_channel *chan)
696{
697 u8 status=read_zsreg(chan, R0);
698 if(status&TxEOM)
699 write_zsctrl(chan, ERR_RES);
700 write_zsctrl(chan, RES_EXT_INT);
701 write_zsctrl(chan, RES_H_IUS);
702}
703
704struct z8530_irqhandler z8530_nop=
705{
706 z8530_rx_clear,
707 z8530_tx_clear,
708 z8530_status_clear
709};
710
711
712EXPORT_SYMBOL(z8530_nop);
713
714/**
715 * z8530_interrupt - Handle an interrupt from a Z8530
716 * @irq: Interrupt number
717 * @dev_id: The Z8530 device that is interrupting.
718 * @regs: unused
719 *
720 * A Z85[2]30 device has stuck its hand in the air for attention.
721 * We scan both the channels on the chip for events and then call
722 * the channel specific call backs for each channel that has events.
723 * We have to use callback functions because the two channels can be
724 * in different modes.
725 *
726 * Locking is done for the handlers. Note that locking is done
727 * at the chip level (the 5uS delay issue is per chip not per
728 * channel). c->lock for both channels points to dev->lock
729 */
730
731irqreturn_t z8530_interrupt(int irq, void *dev_id, struct pt_regs *regs)
732{
733 struct z8530_dev *dev=dev_id;
734 u8 intr;
735 static volatile int locker=0;
736 int work=0;
737 struct z8530_irqhandler *irqs;
738
739 if(locker)
740 {
741 printk(KERN_ERR "IRQ re-enter\n");
742 return IRQ_NONE;
743 }
744 locker=1;
745
746 spin_lock(&dev->lock);
747
748 while(++work<5000)
749 {
750
751 intr = read_zsreg(&dev->chanA, R3);
752 if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
753 break;
754
755 /* This holds the IRQ status. On the 8530 you must read it from chan
756 A even though it applies to the whole chip */
757
758 /* Now walk the chip and see what it is wanting - it may be
759 an IRQ for someone else remember */
760
761 irqs=dev->chanA.irqs;
762
763 if(intr & (CHARxIP|CHATxIP|CHAEXT))
764 {
765 if(intr&CHARxIP)
766 irqs->rx(&dev->chanA);
767 if(intr&CHATxIP)
768 irqs->tx(&dev->chanA);
769 if(intr&CHAEXT)
770 irqs->status(&dev->chanA);
771 }
772
773 irqs=dev->chanB.irqs;
774
775 if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
776 {
777 if(intr&CHBRxIP)
778 irqs->rx(&dev->chanB);
779 if(intr&CHBTxIP)
780 irqs->tx(&dev->chanB);
781 if(intr&CHBEXT)
782 irqs->status(&dev->chanB);
783 }
784 }
785 spin_unlock(&dev->lock);
786 if(work==5000)
787 printk(KERN_ERR "%s: interrupt jammed - abort(0x%X)!\n", dev->name, intr);
788 /* Ok all done */
789 locker=0;
790 return IRQ_HANDLED;
791}
792
793EXPORT_SYMBOL(z8530_interrupt);
794
795static char reg_init[16]=
796{
797 0,0,0,0,
798 0,0,0,0,
799 0,0,0,0,
800 0x55,0,0,0
801};
802
803
804/**
805 * z8530_sync_open - Open a Z8530 channel for PIO
806 * @dev: The network interface we are using
807 * @c: The Z8530 channel to open in synchronous PIO mode
808 *
809 * Switch a Z8530 into synchronous mode without DMA assist. We
810 * raise the RTS/DTR and commence network operation.
811 */
812
813int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
814{
815 unsigned long flags;
816
817 spin_lock_irqsave(c->lock, flags);
818
819 c->sync = 1;
820 c->mtu = dev->mtu+64;
821 c->count = 0;
822 c->skb = NULL;
823 c->skb2 = NULL;
824 c->irqs = &z8530_sync;
825
826 /* This loads the double buffer up */
827 z8530_rx_done(c); /* Load the frame ring */
828 z8530_rx_done(c); /* Load the backup frame */
829 z8530_rtsdtr(c,1);
830 c->dma_tx = 0;
831 c->regs[R1]|=TxINT_ENAB;
832 write_zsreg(c, R1, c->regs[R1]);
833 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
834
835 spin_unlock_irqrestore(c->lock, flags);
836 return 0;
837}
838
839
840EXPORT_SYMBOL(z8530_sync_open);
841
842/**
843 * z8530_sync_close - Close a PIO Z8530 channel
844 * @dev: Network device to close
845 * @c: Z8530 channel to disassociate and move to idle
846 *
847 * Close down a Z8530 interface and switch its interrupt handlers
848 * to discard future events.
849 */
850
851int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
852{
853 u8 chk;
854 unsigned long flags;
855
856 spin_lock_irqsave(c->lock, flags);
857 c->irqs = &z8530_nop;
858 c->max = 0;
859 c->sync = 0;
860
861 chk=read_zsreg(c,R0);
862 write_zsreg(c, R3, c->regs[R3]);
863 z8530_rtsdtr(c,0);
864
865 spin_unlock_irqrestore(c->lock, flags);
866 return 0;
867}
868
869EXPORT_SYMBOL(z8530_sync_close);
870
871/**
872 * z8530_sync_dma_open - Open a Z8530 for DMA I/O
873 * @dev: The network device to attach
874 * @c: The Z8530 channel to configure in sync DMA mode.
875 *
876 * Set up a Z85x30 device for synchronous DMA in both directions. Two
877 * ISA DMA channels must be available for this to work. We assume ISA
878 * DMA driven I/O and PC limits on access.
879 */
880
881int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
882{
883 unsigned long cflags, dflags;
884
885 c->sync = 1;
886 c->mtu = dev->mtu+64;
887 c->count = 0;
888 c->skb = NULL;
889 c->skb2 = NULL;
890 /*
891 * Load the DMA interfaces up
892 */
893 c->rxdma_on = 0;
894 c->txdma_on = 0;
895
896 /*
897 * Allocate the DMA flip buffers. Limit by page size.
898 * Everyone runs 1500 mtu or less on wan links so this
899 * should be fine.
900 */
901
902 if(c->mtu > PAGE_SIZE/2)
903 return -EMSGSIZE;
904
905 c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
906 if(c->rx_buf[0]==NULL)
907 return -ENOBUFS;
908 c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
909
910 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
911 if(c->tx_dma_buf[0]==NULL)
912 {
913 free_page((unsigned long)c->rx_buf[0]);
914 c->rx_buf[0]=NULL;
915 return -ENOBUFS;
916 }
917 c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
918
919 c->tx_dma_used=0;
920 c->dma_tx = 1;
921 c->dma_num=0;
922 c->dma_ready=1;
923
924 /*
925 * Enable DMA control mode
926 */
927
928 spin_lock_irqsave(c->lock, cflags);
929
930 /*
931 * TX DMA via DIR/REQ
932 */
933
934 c->regs[R14]|= DTRREQ;
935 write_zsreg(c, R14, c->regs[R14]);
936
937 c->regs[R1]&= ~TxINT_ENAB;
938 write_zsreg(c, R1, c->regs[R1]);
939
940 /*
941 * RX DMA via W/Req
942 */
943
944 c->regs[R1]|= WT_FN_RDYFN;
945 c->regs[R1]|= WT_RDY_RT;
946 c->regs[R1]|= INT_ERR_Rx;
947 c->regs[R1]&= ~TxINT_ENAB;
948 write_zsreg(c, R1, c->regs[R1]);
949 c->regs[R1]|= WT_RDY_ENAB;
950 write_zsreg(c, R1, c->regs[R1]);
951
952 /*
953 * DMA interrupts
954 */
955
956 /*
957 * Set up the DMA configuration
958 */
959
960 dflags=claim_dma_lock();
961
962 disable_dma(c->rxdma);
963 clear_dma_ff(c->rxdma);
964 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
965 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
966 set_dma_count(c->rxdma, c->mtu);
967 enable_dma(c->rxdma);
968
969 disable_dma(c->txdma);
970 clear_dma_ff(c->txdma);
971 set_dma_mode(c->txdma, DMA_MODE_WRITE);
972 disable_dma(c->txdma);
973
974 release_dma_lock(dflags);
975
976 /*
977 * Select the DMA interrupt handlers
978 */
979
980 c->rxdma_on = 1;
981 c->txdma_on = 1;
982 c->tx_dma_used = 1;
983
984 c->irqs = &z8530_dma_sync;
985 z8530_rtsdtr(c,1);
986 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
987
988 spin_unlock_irqrestore(c->lock, cflags);
989
990 return 0;
991}
992
993EXPORT_SYMBOL(z8530_sync_dma_open);
994
995/**
996 * z8530_sync_dma_close - Close down DMA I/O
997 * @dev: Network device to detach
998 * @c: Z8530 channel to move into discard mode
999 *
1000 * Shut down a DMA mode synchronous interface. Halt the DMA, and
1001 * free the buffers.
1002 */
1003
1004int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
1005{
1006 u8 chk;
1007 unsigned long flags;
1008
1009 c->irqs = &z8530_nop;
1010 c->max = 0;
1011 c->sync = 0;
1012
1013 /*
1014 * Disable the PC DMA channels
1015 */
1016
1017 flags=claim_dma_lock();
1018 disable_dma(c->rxdma);
1019 clear_dma_ff(c->rxdma);
1020
1021 c->rxdma_on = 0;
1022
1023 disable_dma(c->txdma);
1024 clear_dma_ff(c->txdma);
1025 release_dma_lock(flags);
1026
1027 c->txdma_on = 0;
1028 c->tx_dma_used = 0;
1029
1030 spin_lock_irqsave(c->lock, flags);
1031
1032 /*
1033 * Disable DMA control mode
1034 */
1035
1036 c->regs[R1]&= ~WT_RDY_ENAB;
1037 write_zsreg(c, R1, c->regs[R1]);
1038 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1039 c->regs[R1]|= INT_ALL_Rx;
1040 write_zsreg(c, R1, c->regs[R1]);
1041 c->regs[R14]&= ~DTRREQ;
1042 write_zsreg(c, R14, c->regs[R14]);
1043
1044 if(c->rx_buf[0])
1045 {
1046 free_page((unsigned long)c->rx_buf[0]);
1047 c->rx_buf[0]=NULL;
1048 }
1049 if(c->tx_dma_buf[0])
1050 {
1051 free_page((unsigned long)c->tx_dma_buf[0]);
1052 c->tx_dma_buf[0]=NULL;
1053 }
1054 chk=read_zsreg(c,R0);
1055 write_zsreg(c, R3, c->regs[R3]);
1056 z8530_rtsdtr(c,0);
1057
1058 spin_unlock_irqrestore(c->lock, flags);
1059
1060 return 0;
1061}
1062
1063EXPORT_SYMBOL(z8530_sync_dma_close);
1064
1065/**
1066 * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1067 * @dev: The network device to attach
1068 * @c: The Z8530 channel to configure in sync DMA mode.
1069 *
1070 * Set up a Z85x30 device for synchronous DMA tranmission. One
1071 * ISA DMA channel must be available for this to work. The receive
1072 * side is run in PIO mode, but then it has the bigger FIFO.
1073 */
1074
1075int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1076{
1077 unsigned long cflags, dflags;
1078
1079 printk("Opening sync interface for TX-DMA\n");
1080 c->sync = 1;
1081 c->mtu = dev->mtu+64;
1082 c->count = 0;
1083 c->skb = NULL;
1084 c->skb2 = NULL;
1085
1086 /*
1087 * Allocate the DMA flip buffers. Limit by page size.
1088 * Everyone runs 1500 mtu or less on wan links so this
1089 * should be fine.
1090 */
1091
1092 if(c->mtu > PAGE_SIZE/2)
1093 return -EMSGSIZE;
1094
1095 c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1096 if(c->tx_dma_buf[0]==NULL)
1097 return -ENOBUFS;
1098
1099 c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1100
1101
1102 spin_lock_irqsave(c->lock, cflags);
1103
1104 /*
1105 * Load the PIO receive ring
1106 */
1107
1108 z8530_rx_done(c);
1109 z8530_rx_done(c);
1110
1111 /*
1112 * Load the DMA interfaces up
1113 */
1114
1115 c->rxdma_on = 0;
1116 c->txdma_on = 0;
1117
1118 c->tx_dma_used=0;
1119 c->dma_num=0;
1120 c->dma_ready=1;
1121 c->dma_tx = 1;
1122
1123 /*
1124 * Enable DMA control mode
1125 */
1126
1127 /*
1128 * TX DMA via DIR/REQ
1129 */
1130 c->regs[R14]|= DTRREQ;
1131 write_zsreg(c, R14, c->regs[R14]);
1132
1133 c->regs[R1]&= ~TxINT_ENAB;
1134 write_zsreg(c, R1, c->regs[R1]);
1135
1136 /*
1137 * Set up the DMA configuration
1138 */
1139
1140 dflags = claim_dma_lock();
1141
1142 disable_dma(c->txdma);
1143 clear_dma_ff(c->txdma);
1144 set_dma_mode(c->txdma, DMA_MODE_WRITE);
1145 disable_dma(c->txdma);
1146
1147 release_dma_lock(dflags);
1148
1149 /*
1150 * Select the DMA interrupt handlers
1151 */
1152
1153 c->rxdma_on = 0;
1154 c->txdma_on = 1;
1155 c->tx_dma_used = 1;
1156
1157 c->irqs = &z8530_txdma_sync;
1158 z8530_rtsdtr(c,1);
1159 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1160 spin_unlock_irqrestore(c->lock, cflags);
1161
1162 return 0;
1163}
1164
1165EXPORT_SYMBOL(z8530_sync_txdma_open);
1166
1167/**
1168 * z8530_sync_txdma_close - Close down a TX driven DMA channel
1169 * @dev: Network device to detach
1170 * @c: Z8530 channel to move into discard mode
1171 *
1172 * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1173 * and free the buffers.
1174 */
1175
1176int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1177{
1178 unsigned long dflags, cflags;
1179 u8 chk;
1180
1181
1182 spin_lock_irqsave(c->lock, cflags);
1183
1184 c->irqs = &z8530_nop;
1185 c->max = 0;
1186 c->sync = 0;
1187
1188 /*
1189 * Disable the PC DMA channels
1190 */
1191
1192 dflags = claim_dma_lock();
1193
1194 disable_dma(c->txdma);
1195 clear_dma_ff(c->txdma);
1196 c->txdma_on = 0;
1197 c->tx_dma_used = 0;
1198
1199 release_dma_lock(dflags);
1200
1201 /*
1202 * Disable DMA control mode
1203 */
1204
1205 c->regs[R1]&= ~WT_RDY_ENAB;
1206 write_zsreg(c, R1, c->regs[R1]);
1207 c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1208 c->regs[R1]|= INT_ALL_Rx;
1209 write_zsreg(c, R1, c->regs[R1]);
1210 c->regs[R14]&= ~DTRREQ;
1211 write_zsreg(c, R14, c->regs[R14]);
1212
1213 if(c->tx_dma_buf[0])
1214 {
1215 free_page((unsigned long)c->tx_dma_buf[0]);
1216 c->tx_dma_buf[0]=NULL;
1217 }
1218 chk=read_zsreg(c,R0);
1219 write_zsreg(c, R3, c->regs[R3]);
1220 z8530_rtsdtr(c,0);
1221
1222 spin_unlock_irqrestore(c->lock, cflags);
1223 return 0;
1224}
1225
1226
1227EXPORT_SYMBOL(z8530_sync_txdma_close);
1228
1229
1230/*
1231 * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1232 * it exists...
1233 */
1234
1235static char *z8530_type_name[]={
1236 "Z8530",
1237 "Z85C30",
1238 "Z85230"
1239};
1240
1241/**
1242 * z8530_describe - Uniformly describe a Z8530 port
1243 * @dev: Z8530 device to describe
1244 * @mapping: string holding mapping type (eg "I/O" or "Mem")
1245 * @io: the port value in question
1246 *
1247 * Describe a Z8530 in a standard format. We must pass the I/O as
1248 * the port offset isnt predictable. The main reason for this function
1249 * is to try and get a common format of report.
1250 */
1251
1252void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1253{
1254 printk(KERN_INFO "%s: %s found at %s 0x%lX, IRQ %d.\n",
1255 dev->name,
1256 z8530_type_name[dev->type],
1257 mapping,
1258 Z8530_PORT_OF(io),
1259 dev->irq);
1260}
1261
1262EXPORT_SYMBOL(z8530_describe);
1263
1264/*
1265 * Locked operation part of the z8530 init code
1266 */
1267
1268static inline int do_z8530_init(struct z8530_dev *dev)
1269{
1270 /* NOP the interrupt handlers first - we might get a
1271 floating IRQ transition when we reset the chip */
1272 dev->chanA.irqs=&z8530_nop;
1273 dev->chanB.irqs=&z8530_nop;
1274 dev->chanA.dcdcheck=DCD;
1275 dev->chanB.dcdcheck=DCD;
1276
1277 /* Reset the chip */
1278 write_zsreg(&dev->chanA, R9, 0xC0);
1279 udelay(200);
1280 /* Now check its valid */
1281 write_zsreg(&dev->chanA, R12, 0xAA);
1282 if(read_zsreg(&dev->chanA, R12)!=0xAA)
1283 return -ENODEV;
1284 write_zsreg(&dev->chanA, R12, 0x55);
1285 if(read_zsreg(&dev->chanA, R12)!=0x55)
1286 return -ENODEV;
1287
1288 dev->type=Z8530;
1289
1290 /*
1291 * See the application note.
1292 */
1293
1294 write_zsreg(&dev->chanA, R15, 0x01);
1295
1296 /*
1297 * If we can set the low bit of R15 then
1298 * the chip is enhanced.
1299 */
1300
1301 if(read_zsreg(&dev->chanA, R15)==0x01)
1302 {
1303 /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1304 /* Put a char in the fifo */
1305 write_zsreg(&dev->chanA, R8, 0);
1306 if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1307 dev->type = Z85230; /* Has a FIFO */
1308 else
1309 dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1310 }
1311
1312 /*
1313 * The code assumes R7' and friends are
1314 * off. Use write_zsext() for these and keep
1315 * this bit clear.
1316 */
1317
1318 write_zsreg(&dev->chanA, R15, 0);
1319
1320 /*
1321 * At this point it looks like the chip is behaving
1322 */
1323
1324 memcpy(dev->chanA.regs, reg_init, 16);
1325 memcpy(dev->chanB.regs, reg_init ,16);
1326
1327 return 0;
1328}
1329
1330/**
1331 * z8530_init - Initialise a Z8530 device
1332 * @dev: Z8530 device to initialise.
1333 *
1334 * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1335 * is present, identify the type and then program it to hopefully
1336 * keep quite and behave. This matters a lot, a Z8530 in the wrong
1337 * state will sometimes get into stupid modes generating 10Khz
1338 * interrupt streams and the like.
1339 *
1340 * We set the interrupt handler up to discard any events, in case
1341 * we get them during reset or setp.
1342 *
1343 * Return 0 for success, or a negative value indicating the problem
1344 * in errno form.
1345 */
1346
1347int z8530_init(struct z8530_dev *dev)
1348{
1349 unsigned long flags;
1350 int ret;
1351
1352 /* Set up the chip level lock */
1353 spin_lock_init(&dev->lock);
1354 dev->chanA.lock = &dev->lock;
1355 dev->chanB.lock = &dev->lock;
1356
1357 spin_lock_irqsave(&dev->lock, flags);
1358 ret = do_z8530_init(dev);
1359 spin_unlock_irqrestore(&dev->lock, flags);
1360
1361 return ret;
1362}
1363
1364
1365EXPORT_SYMBOL(z8530_init);
1366
1367/**
1368 * z8530_shutdown - Shutdown a Z8530 device
1369 * @dev: The Z8530 chip to shutdown
1370 *
1371 * We set the interrupt handlers to silence any interrupts. We then
1372 * reset the chip and wait 100uS to be sure the reset completed. Just
1373 * in case the caller then tries to do stuff.
1374 *
1375 * This is called without the lock held
1376 */
1377
1378int z8530_shutdown(struct z8530_dev *dev)
1379{
1380 unsigned long flags;
1381 /* Reset the chip */
1382
1383 spin_lock_irqsave(&dev->lock, flags);
1384 dev->chanA.irqs=&z8530_nop;
1385 dev->chanB.irqs=&z8530_nop;
1386 write_zsreg(&dev->chanA, R9, 0xC0);
1387 /* We must lock the udelay, the chip is offlimits here */
1388 udelay(100);
1389 spin_unlock_irqrestore(&dev->lock, flags);
1390 return 0;
1391}
1392
1393EXPORT_SYMBOL(z8530_shutdown);
1394
1395/**
1396 * z8530_channel_load - Load channel data
1397 * @c: Z8530 channel to configure
1398 * @rtable: table of register, value pairs
1399 * FIXME: ioctl to allow user uploaded tables
1400 *
1401 * Load a Z8530 channel up from the system data. We use +16 to
1402 * indicate the "prime" registers. The value 255 terminates the
1403 * table.
1404 */
1405
1406int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1407{
1408 unsigned long flags;
1409
1410 spin_lock_irqsave(c->lock, flags);
1411
1412 while(*rtable!=255)
1413 {
1414 int reg=*rtable++;
1415 if(reg>0x0F)
1416 write_zsreg(c, R15, c->regs[15]|1);
1417 write_zsreg(c, reg&0x0F, *rtable);
1418 if(reg>0x0F)
1419 write_zsreg(c, R15, c->regs[15]&~1);
1420 c->regs[reg]=*rtable++;
1421 }
1422 c->rx_function=z8530_null_rx;
1423 c->skb=NULL;
1424 c->tx_skb=NULL;
1425 c->tx_next_skb=NULL;
1426 c->mtu=1500;
1427 c->max=0;
1428 c->count=0;
1429 c->status=read_zsreg(c, R0);
1430 c->sync=1;
1431 write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1432
1433 spin_unlock_irqrestore(c->lock, flags);
1434 return 0;
1435}
1436
1437EXPORT_SYMBOL(z8530_channel_load);
1438
1439
1440/**
1441 * z8530_tx_begin - Begin packet transmission
1442 * @c: The Z8530 channel to kick
1443 *
1444 * This is the speed sensitive side of transmission. If we are called
1445 * and no buffer is being transmitted we commence the next buffer. If
1446 * nothing is queued we idle the sync.
1447 *
1448 * Note: We are handling this code path in the interrupt path, keep it
1449 * fast or bad things will happen.
1450 *
1451 * Called with the lock held.
1452 */
1453
1454static void z8530_tx_begin(struct z8530_channel *c)
1455{
1456 unsigned long flags;
1457 if(c->tx_skb)
1458 return;
1459
1460 c->tx_skb=c->tx_next_skb;
1461 c->tx_next_skb=NULL;
1462 c->tx_ptr=c->tx_next_ptr;
1463
1464 if(c->tx_skb==NULL)
1465 {
1466 /* Idle on */
1467 if(c->dma_tx)
1468 {
1469 flags=claim_dma_lock();
1470 disable_dma(c->txdma);
1471 /*
1472 * Check if we crapped out.
1473 */
1474 if(get_dma_residue(c->txdma))
1475 {
1476 c->stats.tx_dropped++;
1477 c->stats.tx_fifo_errors++;
1478 }
1479 release_dma_lock(flags);
1480 }
1481 c->txcount=0;
1482 }
1483 else
1484 {
1485 c->txcount=c->tx_skb->len;
1486
1487
1488 if(c->dma_tx)
1489 {
1490 /*
1491 * FIXME. DMA is broken for the original 8530,
1492 * on the older parts we need to set a flag and
1493 * wait for a further TX interrupt to fire this
1494 * stage off
1495 */
1496
1497 flags=claim_dma_lock();
1498 disable_dma(c->txdma);
1499
1500 /*
1501 * These two are needed by the 8530/85C30
1502 * and must be issued when idling.
1503 */
1504
1505 if(c->dev->type!=Z85230)
1506 {
1507 write_zsctrl(c, RES_Tx_CRC);
1508 write_zsctrl(c, RES_EOM_L);
1509 }
1510 write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1511 clear_dma_ff(c->txdma);
1512 set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1513 set_dma_count(c->txdma, c->txcount);
1514 enable_dma(c->txdma);
1515 release_dma_lock(flags);
1516 write_zsctrl(c, RES_EOM_L);
1517 write_zsreg(c, R5, c->regs[R5]|TxENAB);
1518 }
1519 else
1520 {
1521
1522 /* ABUNDER off */
1523 write_zsreg(c, R10, c->regs[10]);
1524 write_zsctrl(c, RES_Tx_CRC);
1525
1526 while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1527 {
1528 write_zsreg(c, R8, *c->tx_ptr++);
1529 c->txcount--;
1530 }
1531
1532 }
1533 }
1534 /*
1535 * Since we emptied tx_skb we can ask for more
1536 */
1537 netif_wake_queue(c->netdevice);
1538}
1539
1540/**
1541 * z8530_tx_done - TX complete callback
1542 * @c: The channel that completed a transmit.
1543 *
1544 * This is called when we complete a packet send. We wake the queue,
1545 * start the next packet going and then free the buffer of the existing
1546 * packet. This code is fairly timing sensitive.
1547 *
1548 * Called with the register lock held.
1549 */
1550
1551static void z8530_tx_done(struct z8530_channel *c)
1552{
1553 struct sk_buff *skb;
1554
1555 /* Actually this can happen.*/
1556 if(c->tx_skb==NULL)
1557 return;
1558
1559 skb=c->tx_skb;
1560 c->tx_skb=NULL;
1561 z8530_tx_begin(c);
1562 c->stats.tx_packets++;
1563 c->stats.tx_bytes+=skb->len;
1564 dev_kfree_skb_irq(skb);
1565}
1566
1567/**
1568 * z8530_null_rx - Discard a packet
1569 * @c: The channel the packet arrived on
1570 * @skb: The buffer
1571 *
1572 * We point the receive handler at this function when idle. Instead
1573 * of syncppp processing the frames we get to throw them away.
1574 */
1575
1576void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1577{
1578 dev_kfree_skb_any(skb);
1579}
1580
1581EXPORT_SYMBOL(z8530_null_rx);
1582
1583/**
1584 * z8530_rx_done - Receive completion callback
1585 * @c: The channel that completed a receive
1586 *
1587 * A new packet is complete. Our goal here is to get back into receive
1588 * mode as fast as possible. On the Z85230 we could change to using
1589 * ESCC mode, but on the older chips we have no choice. We flip to the
1590 * new buffer immediately in DMA mode so that the DMA of the next
1591 * frame can occur while we are copying the previous buffer to an sk_buff
1592 *
1593 * Called with the lock held
1594 */
1595
1596static void z8530_rx_done(struct z8530_channel *c)
1597{
1598 struct sk_buff *skb;
1599 int ct;
1600
1601 /*
1602 * Is our receive engine in DMA mode
1603 */
1604
1605 if(c->rxdma_on)
1606 {
1607 /*
1608 * Save the ready state and the buffer currently
1609 * being used as the DMA target
1610 */
1611
1612 int ready=c->dma_ready;
1613 unsigned char *rxb=c->rx_buf[c->dma_num];
1614 unsigned long flags;
1615
1616 /*
1617 * Complete this DMA. Neccessary to find the length
1618 */
1619
1620 flags=claim_dma_lock();
1621
1622 disable_dma(c->rxdma);
1623 clear_dma_ff(c->rxdma);
1624 c->rxdma_on=0;
1625 ct=c->mtu-get_dma_residue(c->rxdma);
1626 if(ct<0)
1627 ct=2; /* Shit happens.. */
1628 c->dma_ready=0;
1629
1630 /*
1631 * Normal case: the other slot is free, start the next DMA
1632 * into it immediately.
1633 */
1634
1635 if(ready)
1636 {
1637 c->dma_num^=1;
1638 set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1639 set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1640 set_dma_count(c->rxdma, c->mtu);
1641 c->rxdma_on = 1;
1642 enable_dma(c->rxdma);
1643 /* Stop any frames that we missed the head of
1644 from passing */
1645 write_zsreg(c, R0, RES_Rx_CRC);
1646 }
1647 else
1648 /* Can't occur as we dont reenable the DMA irq until
1649 after the flip is done */
1650 printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name);
1651
1652 release_dma_lock(flags);
1653
1654 /*
1655 * Shove the old buffer into an sk_buff. We can't DMA
1656 * directly into one on a PC - it might be above the 16Mb
1657 * boundary. Optimisation - we could check to see if we
1658 * can avoid the copy. Optimisation 2 - make the memcpy
1659 * a copychecksum.
1660 */
1661
1662 skb=dev_alloc_skb(ct);
1663 if(skb==NULL)
1664 {
1665 c->stats.rx_dropped++;
1666 printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name);
1667 }
1668 else
1669 {
1670 skb_put(skb, ct);
1671 memcpy(skb->data, rxb, ct);
1672 c->stats.rx_packets++;
1673 c->stats.rx_bytes+=ct;
1674 }
1675 c->dma_ready=1;
1676 }
1677 else
1678 {
1679 RT_LOCK;
1680 skb=c->skb;
1681
1682 /*
1683 * The game we play for non DMA is similar. We want to
1684 * get the controller set up for the next packet as fast
1685 * as possible. We potentially only have one byte + the
1686 * fifo length for this. Thus we want to flip to the new
1687 * buffer and then mess around copying and allocating
1688 * things. For the current case it doesn't matter but
1689 * if you build a system where the sync irq isnt blocked
1690 * by the kernel IRQ disable then you need only block the
1691 * sync IRQ for the RT_LOCK area.
1692 *
1693 */
1694 ct=c->count;
1695
1696 c->skb = c->skb2;
1697 c->count = 0;
1698 c->max = c->mtu;
1699 if(c->skb)
1700 {
1701 c->dptr = c->skb->data;
1702 c->max = c->mtu;
1703 }
1704 else
1705 {
1706 c->count= 0;
1707 c->max = 0;
1708 }
1709 RT_UNLOCK;
1710
1711 c->skb2 = dev_alloc_skb(c->mtu);
1712 if(c->skb2==NULL)
1713 printk(KERN_WARNING "%s: memory squeeze.\n",
1714 c->netdevice->name);
1715 else
1716 {
1717 skb_put(c->skb2,c->mtu);
1718 }
1719 c->stats.rx_packets++;
1720 c->stats.rx_bytes+=ct;
1721
1722 }
1723 /*
1724 * If we received a frame we must now process it.
1725 */
1726 if(skb)
1727 {
1728 skb_trim(skb, ct);
1729 c->rx_function(c,skb);
1730 }
1731 else
1732 {
1733 c->stats.rx_dropped++;
1734 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1735 }
1736}
1737
1738/**
1739 * spans_boundary - Check a packet can be ISA DMA'd
1740 * @skb: The buffer to check
1741 *
1742 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1743 * thing can only DMA within a 64K block not across the edges of it.
1744 */
1745
1746static inline int spans_boundary(struct sk_buff *skb)
1747{
1748 unsigned long a=(unsigned long)skb->data;
1749 a^=(a+skb->len);
1750 if(a&0x00010000) /* If the 64K bit is different.. */
1751 return 1;
1752 return 0;
1753}
1754
1755/**
1756 * z8530_queue_xmit - Queue a packet
1757 * @c: The channel to use
1758 * @skb: The packet to kick down the channel
1759 *
1760 * Queue a packet for transmission. Because we have rather
1761 * hard to hit interrupt latencies for the Z85230 per packet
1762 * even in DMA mode we do the flip to DMA buffer if needed here
1763 * not in the IRQ.
1764 *
1765 * Called from the network code. The lock is not held at this
1766 * point.
1767 */
1768
1769int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1770{
1771 unsigned long flags;
1772
1773 netif_stop_queue(c->netdevice);
1774 if(c->tx_next_skb)
1775 {
1776 return 1;
1777 }
1778
1779 /* PC SPECIFIC - DMA limits */
1780
1781 /*
1782 * If we will DMA the transmit and its gone over the ISA bus
1783 * limit, then copy to the flip buffer
1784 */
1785
1786 if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1787 {
1788 /*
1789 * Send the flip buffer, and flip the flippy bit.
1790 * We don't care which is used when just so long as
1791 * we never use the same buffer twice in a row. Since
1792 * only one buffer can be going out at a time the other
1793 * has to be safe.
1794 */
1795 c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1796 c->tx_dma_used^=1; /* Flip temp buffer */
1797 memcpy(c->tx_next_ptr, skb->data, skb->len);
1798 }
1799 else
1800 c->tx_next_ptr=skb->data;
1801 RT_LOCK;
1802 c->tx_next_skb=skb;
1803 RT_UNLOCK;
1804
1805 spin_lock_irqsave(c->lock, flags);
1806 z8530_tx_begin(c);
1807 spin_unlock_irqrestore(c->lock, flags);
1808
1809 return 0;
1810}
1811
1812EXPORT_SYMBOL(z8530_queue_xmit);
1813
1814/**
1815 * z8530_get_stats - Get network statistics
1816 * @c: The channel to use
1817 *
1818 * Get the statistics block. We keep the statistics in software as
1819 * the chip doesn't do it for us.
1820 *
1821 * Locking is ignored here - we could lock for a copy but its
1822 * not likely to be that big an issue
1823 */
1824
1825struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
1826{
1827 return &c->stats;
1828}
1829
1830EXPORT_SYMBOL(z8530_get_stats);
1831
1832/*
1833 * Module support
1834 */
1835static char banner[] __initdata = KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1836
1837static int __init z85230_init_driver(void)
1838{
1839 printk(banner);
1840 return 0;
1841}
1842module_init(z85230_init_driver);
1843
1844static void __exit z85230_cleanup_driver(void)
1845{
1846}
1847module_exit(z85230_cleanup_driver);
1848
1849MODULE_AUTHOR("Red Hat Inc.");
1850MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1851MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
new file mode 100644
index 000000000000..77e53208045f
--- /dev/null
+++ b/drivers/net/wan/z85230.h
@@ -0,0 +1,449 @@
1/*
2 * Description of Z8530 Z85C30 and Z85230 communications chips
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Alan Cox <alan@redhat.com>
6 */
7
8#ifndef _Z8530_H
9#define _Z8530_H
10
11#include <linux/tty.h>
12#include <linux/interrupt.h>
13
14/* Conversion routines to/from brg time constants from/to bits
15 * per second.
16 */
17#define BRG_TO_BPS(brg, freq) ((freq) / 2 / ((brg) + 2))
18#define BPS_TO_BRG(bps, freq) ((((freq) + (bps)) / (2 * (bps))) - 2)
19
20/* The Zilog register set */
21
22#define FLAG 0x7e
23
24/* Write Register 0 */
25#define R0 0 /* Register selects */
26#define R1 1
27#define R2 2
28#define R3 3
29#define R4 4
30#define R5 5
31#define R6 6
32#define R7 7
33#define R8 8
34#define R9 9
35#define R10 10
36#define R11 11
37#define R12 12
38#define R13 13
39#define R14 14
40#define R15 15
41
42#define RPRIME 16 /* Indicate a prime register access on 230 */
43
44#define NULLCODE 0 /* Null Code */
45#define POINT_HIGH 0x8 /* Select upper half of registers */
46#define RES_EXT_INT 0x10 /* Reset Ext. Status Interrupts */
47#define SEND_ABORT 0x18 /* HDLC Abort */
48#define RES_RxINT_FC 0x20 /* Reset RxINT on First Character */
49#define RES_Tx_P 0x28 /* Reset TxINT Pending */
50#define ERR_RES 0x30 /* Error Reset */
51#define RES_H_IUS 0x38 /* Reset highest IUS */
52
53#define RES_Rx_CRC 0x40 /* Reset Rx CRC Checker */
54#define RES_Tx_CRC 0x80 /* Reset Tx CRC Checker */
55#define RES_EOM_L 0xC0 /* Reset EOM latch */
56
57/* Write Register 1 */
58
59#define EXT_INT_ENAB 0x1 /* Ext Int Enable */
60#define TxINT_ENAB 0x2 /* Tx Int Enable */
61#define PAR_SPEC 0x4 /* Parity is special condition */
62
63#define RxINT_DISAB 0 /* Rx Int Disable */
64#define RxINT_FCERR 0x8 /* Rx Int on First Character Only or Error */
65#define INT_ALL_Rx 0x10 /* Int on all Rx Characters or error */
66#define INT_ERR_Rx 0x18 /* Int on error only */
67
68#define WT_RDY_RT 0x20 /* Wait/Ready on R/T */
69#define WT_FN_RDYFN 0x40 /* Wait/FN/Ready FN */
70#define WT_RDY_ENAB 0x80 /* Wait/Ready Enable */
71
72/* Write Register #2 (Interrupt Vector) */
73
74/* Write Register 3 */
75
76#define RxENABLE 0x1 /* Rx Enable */
77#define SYNC_L_INH 0x2 /* Sync Character Load Inhibit */
78#define ADD_SM 0x4 /* Address Search Mode (SDLC) */
79#define RxCRC_ENAB 0x8 /* Rx CRC Enable */
80#define ENT_HM 0x10 /* Enter Hunt Mode */
81#define AUTO_ENAB 0x20 /* Auto Enables */
82#define Rx5 0x0 /* Rx 5 Bits/Character */
83#define Rx7 0x40 /* Rx 7 Bits/Character */
84#define Rx6 0x80 /* Rx 6 Bits/Character */
85#define Rx8 0xc0 /* Rx 8 Bits/Character */
86
87/* Write Register 4 */
88
89#define PAR_ENA 0x1 /* Parity Enable */
90#define PAR_EVEN 0x2 /* Parity Even/Odd* */
91
92#define SYNC_ENAB 0 /* Sync Modes Enable */
93#define SB1 0x4 /* 1 stop bit/char */
94#define SB15 0x8 /* 1.5 stop bits/char */
95#define SB2 0xc /* 2 stop bits/char */
96
97#define MONSYNC 0 /* 8 Bit Sync character */
98#define BISYNC 0x10 /* 16 bit sync character */
99#define SDLC 0x20 /* SDLC Mode (01111110 Sync Flag) */
100#define EXTSYNC 0x30 /* External Sync Mode */
101
102#define X1CLK 0x0 /* x1 clock mode */
103#define X16CLK 0x40 /* x16 clock mode */
104#define X32CLK 0x80 /* x32 clock mode */
105#define X64CLK 0xC0 /* x64 clock mode */
106
107/* Write Register 5 */
108
109#define TxCRC_ENAB 0x1 /* Tx CRC Enable */
110#define RTS 0x2 /* RTS */
111#define SDLC_CRC 0x4 /* SDLC/CRC-16 */
112#define TxENAB 0x8 /* Tx Enable */
113#define SND_BRK 0x10 /* Send Break */
114#define Tx5 0x0 /* Tx 5 bits (or less)/character */
115#define Tx7 0x20 /* Tx 7 bits/character */
116#define Tx6 0x40 /* Tx 6 bits/character */
117#define Tx8 0x60 /* Tx 8 bits/character */
118#define DTR 0x80 /* DTR */
119
120/* Write Register 6 (Sync bits 0-7/SDLC Address Field) */
121
122/* Write Register 7 (Sync bits 8-15/SDLC 01111110) */
123
124/* Write Register 8 (transmit buffer) */
125
126/* Write Register 9 (Master interrupt control) */
127#define VIS 1 /* Vector Includes Status */
128#define NV 2 /* No Vector */
129#define DLC 4 /* Disable Lower Chain */
130#define MIE 8 /* Master Interrupt Enable */
131#define STATHI 0x10 /* Status high */
132#define NORESET 0 /* No reset on write to R9 */
133#define CHRB 0x40 /* Reset channel B */
134#define CHRA 0x80 /* Reset channel A */
135#define FHWRES 0xc0 /* Force hardware reset */
136
137/* Write Register 10 (misc control bits) */
138#define BIT6 1 /* 6 bit/8bit sync */
139#define LOOPMODE 2 /* SDLC Loop mode */
140#define ABUNDER 4 /* Abort/flag on SDLC xmit underrun */
141#define MARKIDLE 8 /* Mark/flag on idle */
142#define GAOP 0x10 /* Go active on poll */
143#define NRZ 0 /* NRZ mode */
144#define NRZI 0x20 /* NRZI mode */
145#define FM1 0x40 /* FM1 (transition = 1) */
146#define FM0 0x60 /* FM0 (transition = 0) */
147#define CRCPS 0x80 /* CRC Preset I/O */
148
149/* Write Register 11 (Clock Mode control) */
150#define TRxCXT 0 /* TRxC = Xtal output */
151#define TRxCTC 1 /* TRxC = Transmit clock */
152#define TRxCBR 2 /* TRxC = BR Generator Output */
153#define TRxCDP 3 /* TRxC = DPLL output */
154#define TRxCOI 4 /* TRxC O/I */
155#define TCRTxCP 0 /* Transmit clock = RTxC pin */
156#define TCTRxCP 8 /* Transmit clock = TRxC pin */
157#define TCBR 0x10 /* Transmit clock = BR Generator output */
158#define TCDPLL 0x18 /* Transmit clock = DPLL output */
159#define RCRTxCP 0 /* Receive clock = RTxC pin */
160#define RCTRxCP 0x20 /* Receive clock = TRxC pin */
161#define RCBR 0x40 /* Receive clock = BR Generator output */
162#define RCDPLL 0x60 /* Receive clock = DPLL output */
163#define RTxCX 0x80 /* RTxC Xtal/No Xtal */
164
165/* Write Register 12 (lower byte of baud rate generator time constant) */
166
167/* Write Register 13 (upper byte of baud rate generator time constant) */
168
169/* Write Register 14 (Misc control bits) */
170#define BRENABL 1 /* Baud rate generator enable */
171#define BRSRC 2 /* Baud rate generator source */
172#define DTRREQ 4 /* DTR/Request function */
173#define AUTOECHO 8 /* Auto Echo */
174#define LOOPBAK 0x10 /* Local loopback */
175#define SEARCH 0x20 /* Enter search mode */
176#define RMC 0x40 /* Reset missing clock */
177#define DISDPLL 0x60 /* Disable DPLL */
178#define SSBR 0x80 /* Set DPLL source = BR generator */
179#define SSRTxC 0xa0 /* Set DPLL source = RTxC */
180#define SFMM 0xc0 /* Set FM mode */
181#define SNRZI 0xe0 /* Set NRZI mode */
182
183/* Write Register 15 (external/status interrupt control) */
184#define PRIME 1 /* R5' etc register access (Z85C30/230 only) */
185#define ZCIE 2 /* Zero count IE */
186#define FIFOE 4 /* Z85230 only */
187#define DCDIE 8 /* DCD IE */
188#define SYNCIE 0x10 /* Sync/hunt IE */
189#define CTSIE 0x20 /* CTS IE */
190#define TxUIE 0x40 /* Tx Underrun/EOM IE */
191#define BRKIE 0x80 /* Break/Abort IE */
192
193
194/* Read Register 0 */
195#define Rx_CH_AV 0x1 /* Rx Character Available */
196#define ZCOUNT 0x2 /* Zero count */
197#define Tx_BUF_EMP 0x4 /* Tx Buffer empty */
198#define DCD 0x8 /* DCD */
199#define SYNC_HUNT 0x10 /* Sync/hunt */
200#define CTS 0x20 /* CTS */
201#define TxEOM 0x40 /* Tx underrun */
202#define BRK_ABRT 0x80 /* Break/Abort */
203
204/* Read Register 1 */
205#define ALL_SNT 0x1 /* All sent */
206/* Residue Data for 8 Rx bits/char programmed */
207#define RES3 0x8 /* 0/3 */
208#define RES4 0x4 /* 0/4 */
209#define RES5 0xc /* 0/5 */
210#define RES6 0x2 /* 0/6 */
211#define RES7 0xa /* 0/7 */
212#define RES8 0x6 /* 0/8 */
213#define RES18 0xe /* 1/8 */
214#define RES28 0x0 /* 2/8 */
215/* Special Rx Condition Interrupts */
216#define PAR_ERR 0x10 /* Parity error */
217#define Rx_OVR 0x20 /* Rx Overrun Error */
218#define CRC_ERR 0x40 /* CRC/Framing Error */
219#define END_FR 0x80 /* End of Frame (SDLC) */
220
221/* Read Register 2 (channel b only) - Interrupt vector */
222
223/* Read Register 3 (interrupt pending register) ch a only */
224#define CHBEXT 0x1 /* Channel B Ext/Stat IP */
225#define CHBTxIP 0x2 /* Channel B Tx IP */
226#define CHBRxIP 0x4 /* Channel B Rx IP */
227#define CHAEXT 0x8 /* Channel A Ext/Stat IP */
228#define CHATxIP 0x10 /* Channel A Tx IP */
229#define CHARxIP 0x20 /* Channel A Rx IP */
230
231/* Read Register 8 (receive data register) */
232
233/* Read Register 10 (misc status bits) */
234#define ONLOOP 2 /* On loop */
235#define LOOPSEND 0x10 /* Loop sending */
236#define CLK2MIS 0x40 /* Two clocks missing */
237#define CLK1MIS 0x80 /* One clock missing */
238
239/* Read Register 12 (lower byte of baud rate generator constant) */
240
241/* Read Register 13 (upper byte of baud rate generator constant) */
242
243/* Read Register 15 (value of WR 15) */
244
245
246/*
247 * Interrupt handling functions for this SCC
248 */
249
250struct z8530_channel;
251
252struct z8530_irqhandler
253{
254 void (*rx)(struct z8530_channel *);
255 void (*tx)(struct z8530_channel *);
256 void (*status)(struct z8530_channel *);
257};
258
259/*
260 * A channel of the Z8530
261 */
262
263struct z8530_channel
264{
265 struct z8530_irqhandler *irqs; /* IRQ handlers */
266 /*
267 * Synchronous
268 */
269 u16 count; /* Buyes received */
270 u16 max; /* Most we can receive this frame */
271 u16 mtu; /* MTU of the device */
272 u8 *dptr; /* Pointer into rx buffer */
273 struct sk_buff *skb; /* Buffer dptr points into */
274 struct sk_buff *skb2; /* Pending buffer */
275 u8 status; /* Current DCD */
276 u8 dcdcheck; /* which bit to check for line */
277 u8 sync; /* Set if in sync mode */
278
279 u8 regs[32]; /* Register map for the chip */
280 u8 pendregs[32]; /* Pending register values */
281
282 struct sk_buff *tx_skb; /* Buffer being transmitted */
283 struct sk_buff *tx_next_skb; /* Next transmit buffer */
284 u8 *tx_ptr; /* Byte pointer into the buffer */
285 u8 *tx_next_ptr; /* Next pointer to use */
286 u8 *tx_dma_buf[2]; /* TX flip buffers for DMA */
287 u8 tx_dma_used; /* Flip buffer usage toggler */
288 u16 txcount; /* Count of bytes to transmit */
289
290 void (*rx_function)(struct z8530_channel *, struct sk_buff *);
291
292 /*
293 * Sync DMA
294 */
295
296 u8 rxdma; /* DMA channels */
297 u8 txdma;
298 u8 rxdma_on; /* DMA active if flag set */
299 u8 txdma_on;
300 u8 dma_num; /* Buffer we are DMAing into */
301 u8 dma_ready; /* Is the other buffer free */
302 u8 dma_tx; /* TX is to use DMA */
303 u8 *rx_buf[2]; /* The flip buffers */
304
305 /*
306 * System
307 */
308
309 struct z8530_dev *dev; /* Z85230 chip instance we are from */
310 unsigned long ctrlio; /* I/O ports */
311 unsigned long dataio;
312
313 /*
314 * For PC we encode this way.
315 */
316#define Z8530_PORT_SLEEP 0x80000000
317#define Z8530_PORT_OF(x) ((x)&0xFFFF)
318
319 u32 rx_overrun; /* Overruns - not done yet */
320 u32 rx_crc_err;
321
322 /*
323 * Bound device pointers
324 */
325
326 void *private; /* For our owner */
327 struct net_device *netdevice; /* Network layer device */
328 struct net_device_stats stats; /* Network layer statistics */
329
330 /*
331 * Async features
332 */
333
334 struct tty_struct *tty; /* Attached terminal */
335 int line; /* Minor number */
336 wait_queue_head_t open_wait; /* Tasks waiting to open */
337 wait_queue_head_t close_wait; /* and for close to end */
338 unsigned long event; /* Pending events */
339 int fdcount; /* # of fd on device */
340 int blocked_open; /* # of blocked opens */
341 int x_char; /* XON/XOF char */
342 unsigned char *xmit_buf; /* Transmit pointer */
343 int xmit_head; /* Transmit ring */
344 int xmit_tail;
345 int xmit_cnt;
346 int flags;
347 int timeout;
348 int xmit_fifo_size; /* Transmit FIFO info */
349
350 int close_delay; /* Do we wait for drain on close ? */
351 unsigned short closing_wait;
352
353 /* We need to know the current clock divisor
354 * to read the bps rate the chip has currently
355 * loaded.
356 */
357
358 unsigned char clk_divisor; /* May be 1, 16, 32, or 64 */
359 int zs_baud;
360
361 int magic;
362 int baud_base; /* Baud parameters */
363 int custom_divisor;
364
365
366 unsigned char tx_active; /* character is being xmitted */
367 unsigned char tx_stopped; /* output is suspended */
368
369 spinlock_t *lock; /* Devicr lock */
370};
371
372/*
373 * Each Z853x0 device.
374 */
375
376struct z8530_dev
377{
378 char *name; /* Device instance name */
379 struct z8530_channel chanA; /* SCC channel A */
380 struct z8530_channel chanB; /* SCC channel B */
381 int type;
382#define Z8530 0 /* NMOS dinosaur */
383#define Z85C30 1 /* CMOS - better */
384#define Z85230 2 /* CMOS with real FIFO */
385 int irq; /* Interrupt for the device */
386 int active; /* Soft interrupt enable - the Mac doesn't
387 always have a hard disable on its 8530s... */
388 spinlock_t lock;
389};
390
391
392/*
393 * Functions
394 */
395
396extern u8 z8530_dead_port[];
397extern u8 z8530_hdlc_kilostream_85230[];
398extern u8 z8530_hdlc_kilostream[];
399extern irqreturn_t z8530_interrupt(int, void *, struct pt_regs *);
400extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
401extern int z8530_init(struct z8530_dev *);
402extern int z8530_shutdown(struct z8530_dev *);
403extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
404extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
405extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
406extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
407extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
408extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
409extern int z8530_channel_load(struct z8530_channel *, u8 *);
410extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
411extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c);
412extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
413
414
415/*
416 * Standard interrupt vector sets
417 */
418
419extern struct z8530_irqhandler z8530_sync, z8530_async, z8530_nop;
420
421/*
422 * Asynchronous Interfacing
423 */
424
425#define SERIAL_MAGIC 0x5301
426
427/*
428 * The size of the serial xmit buffer is 1 page, or 4096 bytes
429 */
430
431#define SERIAL_XMIT_SIZE 4096
432#define WAKEUP_CHARS 256
433
434/*
435 * Events are used to schedule things to happen at timer-interrupt
436 * time, instead of at rs interrupt time.
437 */
438#define RS_EVENT_WRITE_WAKEUP 0
439
440/* Internal flags used only by kernel/chr_drv/serial.c */
441#define ZILOG_INITIALIZED 0x80000000 /* Serial port was initialized */
442#define ZILOG_CALLOUT_ACTIVE 0x40000000 /* Call out device is active */
443#define ZILOG_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
444#define ZILOG_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
445#define ZILOG_CLOSING 0x08000000 /* Serial port is closing */
446#define ZILOG_CTS_FLOW 0x04000000 /* Do CTS flow control */
447#define ZILOG_CHECK_CD 0x02000000 /* i.e., CLOCAL */
448
449#endif /* !(_Z8530_H) */