aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/irda
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/irda
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/irda')
-rw-r--r--drivers/net/irda/Kconfig404
-rw-r--r--drivers/net/irda/Makefile47
-rw-r--r--drivers/net/irda/act200l-sir.c257
-rw-r--r--drivers/net/irda/act200l.c297
-rw-r--r--drivers/net/irda/actisys-sir.c246
-rw-r--r--drivers/net/irda/actisys.c288
-rw-r--r--drivers/net/irda/ali-ircc.c2277
-rw-r--r--drivers/net/irda/ali-ircc.h231
-rw-r--r--drivers/net/irda/au1000_ircc.h127
-rw-r--r--drivers/net/irda/au1k_ir.c851
-rw-r--r--drivers/net/irda/donauboe.c1789
-rw-r--r--drivers/net/irda/donauboe.h363
-rw-r--r--drivers/net/irda/ep7211_ir.c122
-rw-r--r--drivers/net/irda/esi-sir.c159
-rw-r--r--drivers/net/irda/esi.c149
-rw-r--r--drivers/net/irda/girbil-sir.c258
-rw-r--r--drivers/net/irda/girbil.c250
-rw-r--r--drivers/net/irda/irda-usb.c1602
-rw-r--r--drivers/net/irda/irda-usb.h163
-rw-r--r--drivers/net/irda/irport.c1146
-rw-r--r--drivers/net/irda/irport.h80
-rw-r--r--drivers/net/irda/irtty-sir.c642
-rw-r--r--drivers/net/irda/irtty-sir.h34
-rw-r--r--drivers/net/irda/litelink-sir.c209
-rw-r--r--drivers/net/irda/litelink.c179
-rw-r--r--drivers/net/irda/ma600-sir.c264
-rw-r--r--drivers/net/irda/ma600.c354
-rw-r--r--drivers/net/irda/mcp2120-sir.c230
-rw-r--r--drivers/net/irda/mcp2120.c240
-rw-r--r--drivers/net/irda/nsc-ircc.c2222
-rw-r--r--drivers/net/irda/nsc-ircc.h280
-rw-r--r--drivers/net/irda/old_belkin-sir.c156
-rw-r--r--drivers/net/irda/old_belkin.c164
-rw-r--r--drivers/net/irda/sa1100_ir.c1045
-rw-r--r--drivers/net/irda/sir-dev.h202
-rw-r--r--drivers/net/irda/sir_core.c56
-rw-r--r--drivers/net/irda/sir_dev.c677
-rw-r--r--drivers/net/irda/sir_dongle.c134
-rw-r--r--drivers/net/irda/sir_kthread.c502
-rw-r--r--drivers/net/irda/smsc-ircc2.c2396
-rw-r--r--drivers/net/irda/smsc-ircc2.h194
-rw-r--r--drivers/net/irda/smsc-sio.h100
-rw-r--r--drivers/net/irda/stir4200.c1184
-rw-r--r--drivers/net/irda/tekram-sir.c232
-rw-r--r--drivers/net/irda/tekram.c282
-rw-r--r--drivers/net/irda/via-ircc.c1676
-rw-r--r--drivers/net/irda/via-ircc.h853
-rw-r--r--drivers/net/irda/vlsi_ir.c1912
-rw-r--r--drivers/net/irda/vlsi_ir.h798
-rw-r--r--drivers/net/irda/w83977af.h53
-rw-r--r--drivers/net/irda/w83977af_ir.c1379
-rw-r--r--drivers/net/irda/w83977af_ir.h199
52 files changed, 29954 insertions, 0 deletions
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
new file mode 100644
index 000000000000..a464841da49f
--- /dev/null
+++ b/drivers/net/irda/Kconfig
@@ -0,0 +1,404 @@
1
2menu "Infrared-port device drivers"
3 depends on IRDA!=n
4
5comment "SIR device drivers"
6
7config IRTTY_SIR
8 tristate "IrTTY (uses Linux serial driver)"
9 depends on IRDA
10 help
11 Say Y here if you want to build support for the IrTTY line
12 discipline. To compile it as a module, choose M here: the module
13 will be called irtty-sir. IrTTY makes it possible to use Linux's
14 own serial driver for all IrDA ports that are 16550 compatible.
15 Most IrDA chips are 16550 compatible so you should probably say Y
16 to this option. Using IrTTY will however limit the speed of the
17 connection to 115200 bps (IrDA SIR mode).
18
19 If unsure, say Y.
20
21comment "Dongle support"
22
23config DONGLE
24 bool "Serial dongle support"
25 depends on IRTTY_SIR
26 help
27 Say Y here if you have an infrared device that connects to your
28 computer's serial port. These devices are called dongles. Then say Y
29 or M to the driver for your particular dongle below.
30
31 Note that the answer to this question won't directly affect the
32 kernel: saying N will just cause the configurator to skip all
33 the questions about serial dongles.
34
35config ESI_DONGLE
36 tristate "ESI JetEye PC dongle"
37 depends on DONGLE && IRDA
38 help
39 Say Y here if you want to build support for the Extended Systems
40 JetEye PC dongle. To compile it as a module, choose M here. The ESI
41 dongle attaches to the normal 9-pin serial port connector, and can
42 currently only be used by IrTTY. To activate support for ESI
43 dongles you will have to start irattach like this:
44 "irattach -d esi".
45
46config ACTISYS_DONGLE
47 tristate "ACTiSYS IR-220L and IR220L+ dongle"
48 depends on DONGLE && IRDA
49 help
50 Say Y here if you want to build support for the ACTiSYS IR-220L and
51 IR220L+ dongles. To compile it as a module, choose M here. The
52 ACTiSYS dongles attaches to the normal 9-pin serial port connector,
53 and can currently only be used by IrTTY. To activate support for
54 ACTiSYS dongles you will have to start irattach like this:
55 "irattach -d actisys" or "irattach -d actisys+".
56
57config TEKRAM_DONGLE
58 tristate "Tekram IrMate 210B dongle"
59 depends on DONGLE && IRDA
60 help
61 Say Y here if you want to build support for the Tekram IrMate 210B
62 dongle. To compile it as a module, choose M here. The Tekram dongle
63 attaches to the normal 9-pin serial port connector, and can
64 currently only be used by IrTTY. To activate support for Tekram
65 dongles you will have to start irattach like this:
66 "irattach -d tekram".
67
68config LITELINK_DONGLE
69 tristate "Parallax LiteLink dongle"
70 depends on DONGLE && IRDA
71 help
72 Say Y here if you want to build support for the Parallax Litelink
73 dongle. To compile it as a module, choose M here. The Parallax
74 dongle attaches to the normal 9-pin serial port connector, and can
75 currently only be used by IrTTY. To activate support for Parallax
76 dongles you will have to start irattach like this:
77 "irattach -d litelink".
78
79config MA600_DONGLE
80 tristate "Mobile Action MA600 dongle"
81 depends on DONGLE && IRDA && EXPERIMENTAL
82 help
83 Say Y here if you want to build support for the Mobile Action MA600
84 dongle. To compile it as a module, choose M here. The MA600 dongle
85 attaches to the normal 9-pin serial port connector, and can
86 currently only be used by IrTTY. The driver should also support
87 the MA620 USB version of the dongle, if the integrated USB-to-RS232
88 converter is supported by usbserial. To activate support for
89 MA600 dongle you will have to start irattach like this:
90 "irattach -d ma600".
91
92config GIRBIL_DONGLE
93 tristate "Greenwich GIrBIL dongle"
94 depends on DONGLE && IRDA && EXPERIMENTAL
95 help
96 Say Y here if you want to build support for the Greenwich GIrBIL
97 dongle. If you want to compile it as a module, choose M here.
98 The Greenwich dongle attaches to the normal 9-pin serial port
99 connector, and can currently only be used by IrTTY. To activate
100 support for Greenwich dongles you will have to start irattach
101 like this: "irattach -d girbil".
102
103config MCP2120_DONGLE
104 tristate "Microchip MCP2120"
105 depends on DONGLE && IRDA && EXPERIMENTAL
106 help
107 Say Y here if you want to build support for the Microchip MCP2120
108 dongle. If you want to compile it as a module, choose M here.
109 The MCP2120 dongle attaches to the normal 9-pin serial port
110 connector, and can currently only be used by IrTTY. To activate
111 support for MCP2120 dongles you will have to start irattach
112 like this: "irattach -d mcp2120".
113
114 You must build this dongle yourself. For more information see:
115 <http://www.eyetap.org/~tangf/irda_sir_linux.html>
116
117config OLD_BELKIN_DONGLE
118 tristate "Old Belkin dongle"
119 depends on DONGLE && IRDA && EXPERIMENTAL
120 help
121 Say Y here if you want to build support for the Adaptec Airport 1000
122 and 2000 dongles. If you want to compile it as a module, choose
123 M here. Some information is contained in the comments
124 at the top of <file:drivers/net/irda/old_belkin.c>.
125
126config ACT200L_DONGLE
127 tristate "ACTiSYS IR-200L dongle"
128 depends on DONGLE && IRDA && EXPERIMENTAL
129 help
130 Say Y here if you want to build support for the ACTiSYS IR-200L
131 dongle. If you want to compile it as a module, choose M here.
132 The ACTiSYS IR-200L dongle attaches to the normal 9-pin serial
133 port connector, and can currently only be used by IrTTY.
134 To activate support for ACTiSYS IR-200L dongle you will have to
135 start irattach like this: "irattach -d act200l".
136
137comment "Old SIR device drivers"
138
139config IRPORT_SIR
140 tristate "IrPORT (IrDA serial driver)"
141 depends on IRDA && BROKEN_ON_SMP
142 ---help---
143 Say Y here if you want to build support for the IrPORT IrDA device
144 driver. To compile it as a module, choose M here: the module will be
145 called irport. IrPORT can be used instead of IrTTY and sometimes
146 this can be better. One example is if your IrDA port does not
147 have echo-canceling, which will work OK with IrPORT since this
148 driver is working in half-duplex mode only. You don't need to use
149 irattach with IrPORT, but you just insert it the same way as FIR
150 drivers (insmod irport io=0x3e8 irq=11). Notice that IrPORT is a
151 SIR device driver which means that speed is limited to 115200 bps.
152
153 If unsure, say Y.
154
155comment "Old Serial dongle support"
156
157config DONGLE_OLD
158 bool "Old Serial dongle support"
159 depends on (IRTTY_OLD || IRPORT_SIR) && BROKEN_ON_SMP
160 help
161 Say Y here if you have an infrared device that connects to your
162 computer's serial port. These devices are called dongles. Then say Y
163 or M to the driver for your particular dongle below.
164
165 Note that the answer to this question won't directly affect the
166 kernel: saying N will just cause the configurator to skip all
167 the questions about serial dongles.
168
169config ESI_DONGLE_OLD
170 tristate "ESI JetEye PC dongle"
171 depends on DONGLE_OLD && IRDA
172 help
173 Say Y here if you want to build support for the Extended Systems
174 JetEye PC dongle. To compile it as a module, choose M here. The ESI
175 dongle attaches to the normal 9-pin serial port connector, and can
176 currently only be used by IrTTY. To activate support for ESI
177 dongles you will have to start irattach like this:
178 "irattach -d esi".
179
180config ACTISYS_DONGLE_OLD
181 tristate "ACTiSYS IR-220L and IR220L+ dongle"
182 depends on DONGLE_OLD && IRDA
183 help
184 Say Y here if you want to build support for the ACTiSYS IR-220L and
185 IR220L+ dongles. To compile it as a module, choose M here. The
186 ACTiSYS dongles attaches to the normal 9-pin serial port connector,
187 and can currently only be used by IrTTY. To activate support for
188 ACTiSYS dongles you will have to start irattach like this:
189 "irattach -d actisys" or "irattach -d actisys+".
190
191config TEKRAM_DONGLE_OLD
192 tristate "Tekram IrMate 210B dongle"
193 depends on DONGLE_OLD && IRDA
194 help
195 Say Y here if you want to build support for the Tekram IrMate 210B
196 dongle. To compile it as a module, choose M here. The Tekram dongle
197 attaches to the normal 9-pin serial port connector, and can
198 currently only be used by IrTTY. To activate support for Tekram
199 dongles you will have to start irattach like this:
200 "irattach -d tekram".
201
202config GIRBIL_DONGLE_OLD
203 tristate "Greenwich GIrBIL dongle"
204 depends on DONGLE_OLD && IRDA
205 help
206 Say Y here if you want to build support for the Greenwich GIrBIL
207 dongle. To compile it as a module, choose M here. The Greenwich
208 dongle attaches to the normal 9-pin serial port connector, and can
209 currently only be used by IrTTY. To activate support for Greenwich
210 dongles you will have to insert "irattach -d girbil" in the
211 /etc/irda/drivers script.
212
213config LITELINK_DONGLE_OLD
214 tristate "Parallax LiteLink dongle"
215 depends on DONGLE_OLD && IRDA
216 help
217 Say Y here if you want to build support for the Parallax Litelink
218 dongle. To compile it as a module, choose M here. The Parallax
219 dongle attaches to the normal 9-pin serial port connector, and can
220 currently only be used by IrTTY. To activate support for Parallax
221 dongles you will have to start irattach like this:
222 "irattach -d litelink".
223
224config MCP2120_DONGLE_OLD
225 tristate "Microchip MCP2120"
226 depends on DONGLE_OLD && IRDA
227 help
228 Say Y here if you want to build support for the Microchip MCP2120
229 dongle. To compile it as a module, choose M here. The MCP2120 dongle
230 attaches to the normal 9-pin serial port connector, and can
231 currently only be used by IrTTY. To activate support for MCP2120
232 dongles you will have to insert "irattach -d mcp2120" in the
233 /etc/irda/drivers script.
234
235 You must build this dongle yourself. For more information see:
236 <http://www.eyetap.org/~tangf/irda_sir_linux.html>
237
238config OLD_BELKIN_DONGLE_OLD
239 tristate "Old Belkin dongle"
240 depends on DONGLE_OLD && IRDA
241 help
242 Say Y here if you want to build support for the Adaptec Airport 1000
243 and 2000 dongles. To compile it as a module, choose M here: the module
244 will be called old_belkin. Some information is contained in the
245 comments at the top of <file:drivers/net/irda/old_belkin.c>.
246
247config ACT200L_DONGLE_OLD
248 tristate "ACTiSYS IR-200L dongle (EXPERIMENTAL)"
249 depends on DONGLE_OLD && EXPERIMENTAL && IRDA
250 help
251 Say Y here if you want to build support for the ACTiSYS IR-200L
252 dongle. To compile it as a module, choose M here. The ACTiSYS
253 IR-200L dongle attaches to the normal 9-pin serial port connector,
254 and can currently only be used by IrTTY. To activate support for
255 ACTiSYS IR-200L dongles you will have to start irattach like this:
256 "irattach -d act200l".
257
258config MA600_DONGLE_OLD
259 tristate "Mobile Action MA600 dongle (EXPERIMENTAL)"
260 depends on DONGLE_OLD && EXPERIMENTAL && IRDA
261 ---help---
262 Say Y here if you want to build support for the Mobile Action MA600
263 dongle. To compile it as a module, choose M here. The MA600 dongle
264 attaches to the normal 9-pin serial port connector, and can
265 currently only be tested on IrCOMM. To activate support for MA600
266 dongles you will have to insert "irattach -d ma600" in the
267 /etc/irda/drivers script. Note: irutils 0.9.15 requires no
268 modification. irutils 0.9.9 needs modification. For more
269 information, download the following tar gzip file.
270
271 There is a pre-compiled module on
272 <http://engsvr.ust.hk/~eetwl95/ma600.html>
273
274config EP7211_IR
275 tristate "EP7211 I/R support"
276 depends on DONGLE_OLD && ARCH_EP7211 && IRDA
277
278comment "FIR device drivers"
279
280config USB_IRDA
281 tristate "IrDA USB dongles"
282 depends on IRDA && USB
283 ---help---
284 Say Y here if you want to build support for the USB IrDA FIR Dongle
285 device driver. To compile it as a module, choose M here: the module
286 will be called irda-usb. IrDA-USB support the various IrDA USB
287 dongles available and most of their pecularities. Those dongles
288 plug in the USB port of your computer, are plug and play, and
289 support SIR and FIR (4Mbps) speeds. On the other hand, those
290 dongles tend to be less efficient than a FIR chipset.
291
292 Please note that the driver is still experimental. And of course,
293 you will need both USB and IrDA support in your kernel...
294
295config SIGMATEL_FIR
296 tristate "SigmaTel STIr4200 bridge (EXPERIMENTAL)"
297 depends on IRDA && USB && EXPERIMENTAL
298 select CRC32
299 ---help---
300 Say Y here if you want to build support for the SigmaTel STIr4200
301 USB IrDA FIR bridge device driver.
302
303 USB bridge based on the SigmaTel STIr4200 don't conform to the
304 IrDA-USB device class specification, and therefore need their
305 own specific driver. Those dongles support SIR and FIR (4Mbps)
306 speeds.
307
308 To compile it as a module, choose M here: the module will be called
309 stir4200.
310
311config NSC_FIR
312 tristate "NSC PC87108/PC87338"
313 depends on IRDA && ISA
314 help
315 Say Y here if you want to build support for the NSC PC87108 and
316 PC87338 IrDA chipsets. This driver supports SIR,
317 MIR and FIR (4Mbps) speeds.
318
319 To compile it as a module, choose M here: the module will be called
320 nsc-ircc.
321
322config WINBOND_FIR
323 tristate "Winbond W83977AF (IR)"
324 depends on IRDA && ISA
325 help
326 Say Y here if you want to build IrDA support for the Winbond
327 W83977AF super-io chipset. This driver should be used for the IrDA
328 chipset in the Corel NetWinder. The driver supports SIR, MIR and
329 FIR (4Mbps) speeds.
330
331 To compile it as a module, choose M here: the module will be called
332 w83977af_ir.
333
334config TOSHIBA_FIR
335 tristate "Toshiba Type-O IR Port"
336 depends on IRDA && PCI && !64BIT
337 help
338 Say Y here if you want to build support for the Toshiba Type-O IR
339 and Donau oboe chipsets. These chipsets are used by the Toshiba
340 Libretto 100/110CT, Tecra 8100, Portege 7020 and many more laptops.
341 To compile it as a module, choose M here: the module will be called
342 donauboe.
343
344config AU1000_FIR
345 tristate "Alchemy Au1000 SIR/FIR"
346 depends on MIPS_AU1000 && IRDA
347
348config SMC_IRCC_FIR
349 tristate "SMSC IrCC (EXPERIMENTAL)"
350 depends on EXPERIMENTAL && IRDA && ISA
351 help
352 Say Y here if you want to build support for the SMC Infrared
353 Communications Controller. It is used in a wide variety of
354 laptops (Fujitsu, Sony, Compaq and some Toshiba).
355 To compile it as a module, choose M here: the module will be called
356 smsc-ircc2.o.
357
358config ALI_FIR
359 tristate "ALi M5123 FIR (EXPERIMENTAL)"
360 depends on EXPERIMENTAL && IRDA && ISA
361 help
362 Say Y here if you want to build support for the ALi M5123 FIR
363 Controller. The ALi M5123 FIR Controller is embedded in ALi M1543C,
364 M1535, M1535D, M1535+, M1535D Sourth Bridge. This driver supports
365 SIR, MIR and FIR (4Mbps) speeds.
366
367 To compile it as a module, choose M here: the module will be called
368 ali-ircc.
369
370config VLSI_FIR
371 tristate "VLSI 82C147 SIR/MIR/FIR (EXPERIMENTAL)"
372 depends on EXPERIMENTAL && IRDA && PCI
373 help
374 Say Y here if you want to build support for the VLSI 82C147
375 PCI-IrDA Controller. This controller is used by the HP OmniBook 800
376 and 5500 notebooks. The driver provides support for SIR, MIR and
377 FIR (4Mbps) speeds.
378
379 To compile it as a module, choose M here: the module will be called
380 vlsi_ir.
381
382config SA1100_FIR
383 tristate "SA1100 Internal IR"
384 depends on ARCH_SA1100 && IRDA
385
386config VIA_FIR
387 tristate "VIA VT8231/VT1211 SIR/MIR/FIR"
388 depends on IRDA && ISA && PCI
389 help
390 Say Y here if you want to build support for the VIA VT8231
391 and VIA VT1211 IrDA controllers, found on the motherboards using
392 those those VIA chipsets. To use this controller, you will need
393 to plug a specific 5 pins FIR IrDA dongle in the specific
394 motherboard connector. The driver provides support for SIR, MIR
395 and FIR (4Mbps) speeds.
396
397 You will need to specify the 'dongle_id' module parameter to
398 indicate the FIR dongle attached to the controller.
399
400 To compile it as a module, choose M here: the module will be called
401 via-ircc.
402
403endmenu
404
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
new file mode 100644
index 000000000000..29a8bd812b21
--- /dev/null
+++ b/drivers/net/irda/Makefile
@@ -0,0 +1,47 @@
1#
2# Makefile for the Linux IrDA infrared port device drivers.
3#
4# 9 Aug 2000, Christoph Hellwig <hch@infradead.org>
5# Rewritten to use lists instead of if-statements.
6#
7
8# Old SIR drivers
9obj-$(CONFIG_IRPORT_SIR) += irport.o
10# FIR drivers
11obj-$(CONFIG_USB_IRDA) += irda-usb.o
12obj-$(CONFIG_SIGMATEL_FIR) += stir4200.o
13obj-$(CONFIG_NSC_FIR) += nsc-ircc.o
14obj-$(CONFIG_WINBOND_FIR) += w83977af_ir.o
15obj-$(CONFIG_SA1100_FIR) += sa1100_ir.o
16obj-$(CONFIG_TOSHIBA_FIR) += donauboe.o
17obj-$(CONFIG_SMC_IRCC_FIR) += smsc-ircc2.o
18obj-$(CONFIG_ALI_FIR) += ali-ircc.o
19obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
20obj-$(CONFIG_VIA_FIR) += via-ircc.o
21# Old dongle drivers for old SIR drivers
22obj-$(CONFIG_ESI_DONGLE_OLD) += esi.o
23obj-$(CONFIG_TEKRAM_DONGLE_OLD) += tekram.o
24obj-$(CONFIG_ACTISYS_DONGLE_OLD) += actisys.o
25obj-$(CONFIG_GIRBIL_DONGLE_OLD) += girbil.o
26obj-$(CONFIG_LITELINK_DONGLE_OLD) += litelink.o
27obj-$(CONFIG_OLD_BELKIN_DONGLE_OLD) += old_belkin.o
28obj-$(CONFIG_MCP2120_DONGLE_OLD) += mcp2120.o
29obj-$(CONFIG_ACT200L_DONGLE_OLD) += act200l.o
30obj-$(CONFIG_MA600_DONGLE_OLD) += ma600.o
31obj-$(CONFIG_EP7211_IR) += ep7211_ir.o
32obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
33# New SIR drivers
34obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
35# New dongles drivers for new SIR drivers
36obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
37obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
38obj-$(CONFIG_ACTISYS_DONGLE) += actisys-sir.o
39obj-$(CONFIG_LITELINK_DONGLE) += litelink-sir.o
40obj-$(CONFIG_GIRBIL_DONGLE) += girbil-sir.o
41obj-$(CONFIG_OLD_BELKIN_DONGLE) += old_belkin-sir.o
42obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
43obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
44obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
45
46# The SIR helper module
47sir-dev-objs := sir_core.o sir_dev.o sir_dongle.o sir_kthread.o
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
new file mode 100644
index 000000000000..d8b89c74aabd
--- /dev/null
+++ b/drivers/net/irda/act200l-sir.c
@@ -0,0 +1,257 @@
1/*********************************************************************
2 *
3 * Filename: act200l.c
4 * Version: 0.8
5 * Description: Implementation for the ACTiSYS ACT-IR200L dongle
6 * Status: Experimental.
7 * Author: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
8 * Created at: Fri Aug 3 17:35:42 2001
9 * Modified at: Fri Aug 17 10:22:40 2001
10 * Modified by: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
11 *
12 * Copyright (c) 2001 SHIMIZU Takuya, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 ********************************************************************/
20
21#include <linux/module.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24
25#include <net/irda/irda.h>
26
27#include "sir-dev.h"
28
29static int act200l_reset(struct sir_dev *dev);
30static int act200l_open(struct sir_dev *dev);
31static int act200l_close(struct sir_dev *dev);
32static int act200l_change_speed(struct sir_dev *dev, unsigned speed);
33
34/* Regsiter 0: Control register #1 */
35#define ACT200L_REG0 0x00
36#define ACT200L_TXEN 0x01 /* Enable transmitter */
37#define ACT200L_RXEN 0x02 /* Enable receiver */
38
39/* Register 1: Control register #2 */
40#define ACT200L_REG1 0x10
41#define ACT200L_LODB 0x01 /* Load new baud rate count value */
42#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */
43
44/* Register 4: Output Power register */
45#define ACT200L_REG4 0x40
46#define ACT200L_OP0 0x01 /* Enable LED1C output */
47#define ACT200L_OP1 0x02 /* Enable LED2C output */
48#define ACT200L_BLKR 0x04
49
50/* Register 5: Receive Mode register */
51#define ACT200L_REG5 0x50
52#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */
53
54/* Register 6: Receive Sensitivity register #1 */
55#define ACT200L_REG6 0x60
56#define ACT200L_RS0 0x01 /* receive threshold bit 0 */
57#define ACT200L_RS1 0x02 /* receive threshold bit 1 */
58
59/* Register 7: Receive Sensitivity register #2 */
60#define ACT200L_REG7 0x70
61#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */
62
63/* Register 8,9: Baud Rate Dvider register #1,#2 */
64#define ACT200L_REG8 0x80
65#define ACT200L_REG9 0x90
66
67#define ACT200L_2400 0x5f
68#define ACT200L_9600 0x17
69#define ACT200L_19200 0x0b
70#define ACT200L_38400 0x05
71#define ACT200L_57600 0x03
72#define ACT200L_115200 0x01
73
74/* Register 13: Control register #3 */
75#define ACT200L_REG13 0xd0
76#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */
77
78/* Register 15: Status register */
79#define ACT200L_REG15 0xf0
80
81/* Register 21: Control register #4 */
82#define ACT200L_REG21 0x50
83#define ACT200L_EXCK 0x02 /* Disable clock output driver */
84#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */
85
86static struct dongle_driver act200l = {
87 .owner = THIS_MODULE,
88 .driver_name = "ACTiSYS ACT-IR200L",
89 .type = IRDA_ACT200L_DONGLE,
90 .open = act200l_open,
91 .close = act200l_close,
92 .reset = act200l_reset,
93 .set_speed = act200l_change_speed,
94};
95
96static int __init act200l_sir_init(void)
97{
98 return irda_register_dongle(&act200l);
99}
100
101static void __exit act200l_sir_cleanup(void)
102{
103 irda_unregister_dongle(&act200l);
104}
105
106static int act200l_open(struct sir_dev *dev)
107{
108 struct qos_info *qos = &dev->qos;
109
110 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
111
112 /* Power on the dongle */
113 sirdev_set_dtr_rts(dev, TRUE, TRUE);
114
115 /* Set the speeds we can accept */
116 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
117 qos->min_turn_time.bits = 0x03;
118 irda_qos_bits_to_value(qos);
119
120 /* irda thread waits 50 msec for power settling */
121
122 return 0;
123}
124
125static int act200l_close(struct sir_dev *dev)
126{
127 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
128
129 /* Power off the dongle */
130 sirdev_set_dtr_rts(dev, FALSE, FALSE);
131
132 return 0;
133}
134
135/*
136 * Function act200l_change_speed (dev, speed)
137 *
138 * Set the speed for the ACTiSYS ACT-IR200L type dongle.
139 *
140 */
141static int act200l_change_speed(struct sir_dev *dev, unsigned speed)
142{
143 u8 control[3];
144 int ret = 0;
145
146 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
147
148 /* Clear DTR and set RTS to enter command mode */
149 sirdev_set_dtr_rts(dev, FALSE, TRUE);
150
151 switch (speed) {
152 default:
153 ret = -EINVAL;
154 /* fall through */
155 case 9600:
156 control[0] = ACT200L_REG8 | (ACT200L_9600 & 0x0f);
157 control[1] = ACT200L_REG9 | ((ACT200L_9600 >> 4) & 0x0f);
158 break;
159 case 19200:
160 control[0] = ACT200L_REG8 | (ACT200L_19200 & 0x0f);
161 control[1] = ACT200L_REG9 | ((ACT200L_19200 >> 4) & 0x0f);
162 break;
163 case 38400:
164 control[0] = ACT200L_REG8 | (ACT200L_38400 & 0x0f);
165 control[1] = ACT200L_REG9 | ((ACT200L_38400 >> 4) & 0x0f);
166 break;
167 case 57600:
168 control[0] = ACT200L_REG8 | (ACT200L_57600 & 0x0f);
169 control[1] = ACT200L_REG9 | ((ACT200L_57600 >> 4) & 0x0f);
170 break;
171 case 115200:
172 control[0] = ACT200L_REG8 | (ACT200L_115200 & 0x0f);
173 control[1] = ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f);
174 break;
175 }
176 control[2] = ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE;
177
178 /* Write control bytes */
179 sirdev_raw_write(dev, control, 3);
180 msleep(5);
181
182 /* Go back to normal mode */
183 sirdev_set_dtr_rts(dev, TRUE, TRUE);
184
185 dev->speed = speed;
186 return ret;
187}
188
189/*
190 * Function act200l_reset (driver)
191 *
192 * Reset the ACTiSYS ACT-IR200L type dongle.
193 */
194
195#define ACT200L_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
196#define ACT200L_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
197
198static int act200l_reset(struct sir_dev *dev)
199{
200 unsigned state = dev->fsm.substate;
201 unsigned delay = 0;
202 u8 control[9] = {
203 ACT200L_REG15,
204 ACT200L_REG13 | ACT200L_SHDW,
205 ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
206 ACT200L_REG13,
207 ACT200L_REG7 | ACT200L_ENPOS,
208 ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1,
209 ACT200L_REG5 | ACT200L_RWIDL,
210 ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR,
211 ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN
212 };
213 int ret = 0;
214
215 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
216
217 switch (state) {
218 case SIRDEV_STATE_DONGLE_RESET:
219 /* Reset the dongle : set RTS low for 25 ms */
220 sirdev_set_dtr_rts(dev, TRUE, FALSE);
221 state = ACT200L_STATE_WAIT1_RESET;
222 delay = 50;
223 break;
224
225 case ACT200L_STATE_WAIT1_RESET:
226 /* Clear DTR and set RTS to enter command mode */
227 sirdev_set_dtr_rts(dev, FALSE, TRUE);
228
229 udelay(25); /* better wait for some short while */
230
231 /* Write control bytes */
232 sirdev_raw_write(dev, control, sizeof(control));
233 state = ACT200L_STATE_WAIT2_RESET;
234 delay = 15;
235 break;
236
237 case ACT200L_STATE_WAIT2_RESET:
238 /* Go back to normal mode */
239 sirdev_set_dtr_rts(dev, TRUE, TRUE);
240 dev->speed = 9600;
241 break;
242 default:
243 IRDA_ERROR("%s(), unknown state %d\n", __FUNCTION__, state);
244 ret = -1;
245 break;
246 }
247 dev->fsm.substate = state;
248 return (delay > 0) ? delay : ret;
249}
250
251MODULE_AUTHOR("SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>");
252MODULE_DESCRIPTION("ACTiSYS ACT-IR200L dongle driver");
253MODULE_LICENSE("GPL");
254MODULE_ALIAS("irda-dongle-10"); /* IRDA_ACT200L_DONGLE */
255
256module_init(act200l_sir_init);
257module_exit(act200l_sir_cleanup);
diff --git a/drivers/net/irda/act200l.c b/drivers/net/irda/act200l.c
new file mode 100644
index 000000000000..756cd44e857a
--- /dev/null
+++ b/drivers/net/irda/act200l.c
@@ -0,0 +1,297 @@
1/*********************************************************************
2 *
3 * Filename: act200l.c
4 * Version: 0.8
5 * Description: Implementation for the ACTiSYS ACT-IR200L dongle
6 * Status: Experimental.
7 * Author: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
8 * Created at: Fri Aug 3 17:35:42 2001
9 * Modified at: Fri Aug 17 10:22:40 2001
10 * Modified by: SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>
11 *
12 * Copyright (c) 2001 SHIMIZU Takuya, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 ********************************************************************/
20
21#include <linux/module.h>
22#include <linux/delay.h>
23#include <linux/tty.h>
24#include <linux/init.h>
25
26#include <net/irda/irda.h>
27#include <net/irda/irda_device.h>
28
29static int act200l_reset(struct irda_task *task);
30static void act200l_open(dongle_t *self, struct qos_info *qos);
31static void act200l_close(dongle_t *self);
32static int act200l_change_speed(struct irda_task *task);
33
34/* Regsiter 0: Control register #1 */
35#define ACT200L_REG0 0x00
36#define ACT200L_TXEN 0x01 /* Enable transmitter */
37#define ACT200L_RXEN 0x02 /* Enable receiver */
38
39/* Register 1: Control register #2 */
40#define ACT200L_REG1 0x10
41#define ACT200L_LODB 0x01 /* Load new baud rate count value */
42#define ACT200L_WIDE 0x04 /* Expand the maximum allowable pulse */
43
44/* Register 4: Output Power register */
45#define ACT200L_REG4 0x40
46#define ACT200L_OP0 0x01 /* Enable LED1C output */
47#define ACT200L_OP1 0x02 /* Enable LED2C output */
48#define ACT200L_BLKR 0x04
49
50/* Register 5: Receive Mode register */
51#define ACT200L_REG5 0x50
52#define ACT200L_RWIDL 0x01 /* fixed 1.6us pulse mode */
53
54/* Register 6: Receive Sensitivity register #1 */
55#define ACT200L_REG6 0x60
56#define ACT200L_RS0 0x01 /* receive threshold bit 0 */
57#define ACT200L_RS1 0x02 /* receive threshold bit 1 */
58
59/* Register 7: Receive Sensitivity register #2 */
60#define ACT200L_REG7 0x70
61#define ACT200L_ENPOS 0x04 /* Ignore the falling edge */
62
63/* Register 8,9: Baud Rate Dvider register #1,#2 */
64#define ACT200L_REG8 0x80
65#define ACT200L_REG9 0x90
66
67#define ACT200L_2400 0x5f
68#define ACT200L_9600 0x17
69#define ACT200L_19200 0x0b
70#define ACT200L_38400 0x05
71#define ACT200L_57600 0x03
72#define ACT200L_115200 0x01
73
74/* Register 13: Control register #3 */
75#define ACT200L_REG13 0xd0
76#define ACT200L_SHDW 0x01 /* Enable access to shadow registers */
77
78/* Register 15: Status register */
79#define ACT200L_REG15 0xf0
80
81/* Register 21: Control register #4 */
82#define ACT200L_REG21 0x50
83#define ACT200L_EXCK 0x02 /* Disable clock output driver */
84#define ACT200L_OSCL 0x04 /* oscillator in low power, medium accuracy mode */
85
86static struct dongle_reg dongle = {
87 .type = IRDA_ACT200L_DONGLE,
88 .open = act200l_open,
89 .close = act200l_close,
90 .reset = act200l_reset,
91 .change_speed = act200l_change_speed,
92 .owner = THIS_MODULE,
93};
94
95static int __init act200l_init(void)
96{
97 return irda_device_register_dongle(&dongle);
98}
99
100static void __exit act200l_cleanup(void)
101{
102 irda_device_unregister_dongle(&dongle);
103}
104
105static void act200l_open(dongle_t *self, struct qos_info *qos)
106{
107 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
108
109 /* Power on the dongle */
110 self->set_dtr_rts(self->dev, TRUE, TRUE);
111
112 /* Set the speeds we can accept */
113 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
114 qos->min_turn_time.bits = 0x03;
115}
116
117static void act200l_close(dongle_t *self)
118{
119 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
120
121 /* Power off the dongle */
122 self->set_dtr_rts(self->dev, FALSE, FALSE);
123}
124
125/*
126 * Function act200l_change_speed (dev, speed)
127 *
128 * Set the speed for the ACTiSYS ACT-IR200L type dongle.
129 *
130 */
131static int act200l_change_speed(struct irda_task *task)
132{
133 dongle_t *self = (dongle_t *) task->instance;
134 __u32 speed = (__u32) task->param;
135 __u8 control[3];
136 int ret = 0;
137
138 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
139
140 self->speed_task = task;
141
142 switch (task->state) {
143 case IRDA_TASK_INIT:
144 if (irda_task_execute(self, act200l_reset, NULL, task,
145 (void *) speed))
146 {
147 /* Dongle need more time to reset */
148 irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
149
150 /* Give reset 1 sec to finish */
151 ret = msecs_to_jiffies(1000);
152 }
153 break;
154 case IRDA_TASK_CHILD_WAIT:
155 IRDA_WARNING("%s(), resetting dongle timed out!\n",
156 __FUNCTION__);
157 ret = -1;
158 break;
159 case IRDA_TASK_CHILD_DONE:
160 /* Clear DTR and set RTS to enter command mode */
161 self->set_dtr_rts(self->dev, FALSE, TRUE);
162
163 switch (speed) {
164 case 9600:
165 default:
166 control[0] = ACT200L_REG8 | (ACT200L_9600 & 0x0f);
167 control[1] = ACT200L_REG9 | ((ACT200L_9600 >> 4) & 0x0f);
168 break;
169 case 19200:
170 control[0] = ACT200L_REG8 | (ACT200L_19200 & 0x0f);
171 control[1] = ACT200L_REG9 | ((ACT200L_19200 >> 4) & 0x0f);
172 break;
173 case 38400:
174 control[0] = ACT200L_REG8 | (ACT200L_38400 & 0x0f);
175 control[1] = ACT200L_REG9 | ((ACT200L_38400 >> 4) & 0x0f);
176 break;
177 case 57600:
178 control[0] = ACT200L_REG8 | (ACT200L_57600 & 0x0f);
179 control[1] = ACT200L_REG9 | ((ACT200L_57600 >> 4) & 0x0f);
180 break;
181 case 115200:
182 control[0] = ACT200L_REG8 | (ACT200L_115200 & 0x0f);
183 control[1] = ACT200L_REG9 | ((ACT200L_115200 >> 4) & 0x0f);
184 break;
185 }
186 control[2] = ACT200L_REG1 | ACT200L_LODB | ACT200L_WIDE;
187
188 /* Write control bytes */
189 self->write(self->dev, control, 3);
190 irda_task_next_state(task, IRDA_TASK_WAIT);
191 ret = msecs_to_jiffies(5);
192 break;
193 case IRDA_TASK_WAIT:
194 /* Go back to normal mode */
195 self->set_dtr_rts(self->dev, TRUE, TRUE);
196
197 irda_task_next_state(task, IRDA_TASK_DONE);
198 self->speed_task = NULL;
199 break;
200 default:
201 IRDA_ERROR("%s(), unknown state %d\n",
202 __FUNCTION__, task->state);
203 irda_task_next_state(task, IRDA_TASK_DONE);
204 self->speed_task = NULL;
205 ret = -1;
206 break;
207 }
208 return ret;
209}
210
211/*
212 * Function act200l_reset (driver)
213 *
214 * Reset the ACTiSYS ACT-IR200L type dongle.
215 */
216static int act200l_reset(struct irda_task *task)
217{
218 dongle_t *self = (dongle_t *) task->instance;
219 __u8 control[9] = {
220 ACT200L_REG15,
221 ACT200L_REG13 | ACT200L_SHDW,
222 ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
223 ACT200L_REG13,
224 ACT200L_REG7 | ACT200L_ENPOS,
225 ACT200L_REG6 | ACT200L_RS0 | ACT200L_RS1,
226 ACT200L_REG5 | ACT200L_RWIDL,
227 ACT200L_REG4 | ACT200L_OP0 | ACT200L_OP1 | ACT200L_BLKR,
228 ACT200L_REG0 | ACT200L_TXEN | ACT200L_RXEN
229 };
230 int ret = 0;
231
232 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
233
234 self->reset_task = task;
235
236 switch (task->state) {
237 case IRDA_TASK_INIT:
238 /* Power on the dongle */
239 self->set_dtr_rts(self->dev, TRUE, TRUE);
240
241 irda_task_next_state(task, IRDA_TASK_WAIT1);
242 ret = msecs_to_jiffies(50);
243 break;
244 case IRDA_TASK_WAIT1:
245 /* Reset the dongle : set RTS low for 25 ms */
246 self->set_dtr_rts(self->dev, TRUE, FALSE);
247
248 irda_task_next_state(task, IRDA_TASK_WAIT2);
249 ret = msecs_to_jiffies(50);
250 break;
251 case IRDA_TASK_WAIT2:
252 /* Clear DTR and set RTS to enter command mode */
253 self->set_dtr_rts(self->dev, FALSE, TRUE);
254
255 /* Write control bytes */
256 self->write(self->dev, control, 9);
257 irda_task_next_state(task, IRDA_TASK_WAIT3);
258 ret = msecs_to_jiffies(15);
259 break;
260 case IRDA_TASK_WAIT3:
261 /* Go back to normal mode */
262 self->set_dtr_rts(self->dev, TRUE, TRUE);
263
264 irda_task_next_state(task, IRDA_TASK_DONE);
265 self->reset_task = NULL;
266 break;
267 default:
268 IRDA_ERROR("%s(), unknown state %d\n",
269 __FUNCTION__, task->state);
270 irda_task_next_state(task, IRDA_TASK_DONE);
271 self->reset_task = NULL;
272 ret = -1;
273 break;
274 }
275 return ret;
276}
277
278MODULE_AUTHOR("SHIMIZU Takuya <tshimizu@ga2.so-net.ne.jp>");
279MODULE_DESCRIPTION("ACTiSYS ACT-IR200L dongle driver");
280MODULE_LICENSE("GPL");
281MODULE_ALIAS("irda-dongle-10"); /* IRDA_ACT200L_DONGLE */
282
283/*
284 * Function init_module (void)
285 *
286 * Initialize ACTiSYS ACT-IR200L module
287 *
288 */
289module_init(act200l_init);
290
291/*
292 * Function cleanup_module (void)
293 *
294 * Cleanup ACTiSYS ACT-IR200L module
295 *
296 */
297module_exit(act200l_cleanup);
diff --git a/drivers/net/irda/actisys-sir.c b/drivers/net/irda/actisys-sir.c
new file mode 100644
index 000000000000..9715ab5572e9
--- /dev/null
+++ b/drivers/net/irda/actisys-sir.c
@@ -0,0 +1,246 @@
1/*********************************************************************
2 *
3 * Filename: actisys.c
4 * Version: 1.1
5 * Description: Implementation for the ACTiSYS IR-220L and IR-220L+
6 * dongles
7 * Status: Beta.
8 * Authors: Dag Brattli <dagb@cs.uit.no> (initially)
9 * Jean Tourrilhes <jt@hpl.hp.com> (new version)
10 * Martin Diehl <mad@mdiehl.de> (new version for sir_dev)
11 * Created at: Wed Oct 21 20:02:35 1998
12 * Modified at: Sun Oct 27 22:02:13 2002
13 * Modified by: Martin Diehl <mad@mdiehl.de>
14 *
15 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
16 * Copyright (c) 1999 Jean Tourrilhes
17 * Copyright (c) 2002 Martin Diehl
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License as
21 * published by the Free Software Foundation; either version 2 of
22 * the License, or (at your option) any later version.
23 *
24 * Neither Dag Brattli nor University of Tromsų admit liability nor
25 * provide warranty for any of this software. This material is
26 * provided "AS-IS" and at no charge.
27 *
28 ********************************************************************/
29
30/*
31 * Changelog
32 *
33 * 0.8 -> 0.9999 - Jean
34 * o New initialisation procedure : much safer and correct
35 * o New procedure the change speed : much faster and simpler
36 * o Other cleanups & comments
37 * Thanks to Lichen Wang @ Actisys for his excellent help...
38 *
39 * 1.0 -> 1.1 - Martin Diehl
40 * modified for new sir infrastructure
41 */
42
43#include <linux/module.h>
44#include <linux/delay.h>
45#include <linux/init.h>
46
47#include <net/irda/irda.h>
48
49#include "sir-dev.h"
50
51/*
52 * Define the timing of the pulses we send to the dongle (to reset it, and
53 * to toggle speeds). Basically, the limit here is the propagation speed of
54 * the signals through the serial port, the dongle being much faster. Any
55 * serial port support 115 kb/s, so we are sure that pulses 8.5 us wide can
56 * go through cleanly . If you are on the wild side, you can try to lower
57 * this value (Actisys recommended me 2 us, and 0 us work for me on a P233!)
58 */
59#define MIN_DELAY 10 /* 10 us to be on the conservative side */
60
61static int actisys_open(struct sir_dev *);
62static int actisys_close(struct sir_dev *);
63static int actisys_change_speed(struct sir_dev *, unsigned);
64static int actisys_reset(struct sir_dev *);
65
66/* These are the baudrates supported, in the order available */
67/* Note : the 220L doesn't support 38400, but we will fix that below */
68static unsigned baud_rates[] = { 9600, 19200, 57600, 115200, 38400 };
69
70#define MAX_SPEEDS (sizeof(baud_rates)/sizeof(baud_rates[0]))
71
72static struct dongle_driver act220l = {
73 .owner = THIS_MODULE,
74 .driver_name = "Actisys ACT-220L",
75 .type = IRDA_ACTISYS_DONGLE,
76 .open = actisys_open,
77 .close = actisys_close,
78 .reset = actisys_reset,
79 .set_speed = actisys_change_speed,
80};
81
82static struct dongle_driver act220l_plus = {
83 .owner = THIS_MODULE,
84 .driver_name = "Actisys ACT-220L+",
85 .type = IRDA_ACTISYS_PLUS_DONGLE,
86 .open = actisys_open,
87 .close = actisys_close,
88 .reset = actisys_reset,
89 .set_speed = actisys_change_speed,
90};
91
92static int __init actisys_sir_init(void)
93{
94 int ret;
95
96 /* First, register an Actisys 220L dongle */
97 ret = irda_register_dongle(&act220l);
98 if (ret < 0)
99 return ret;
100
101 /* Now, register an Actisys 220L+ dongle */
102 ret = irda_register_dongle(&act220l_plus);
103 if (ret < 0) {
104 irda_unregister_dongle(&act220l);
105 return ret;
106 }
107 return 0;
108}
109
110static void __exit actisys_sir_cleanup(void)
111{
112 /* We have to remove both dongles */
113 irda_unregister_dongle(&act220l_plus);
114 irda_unregister_dongle(&act220l);
115}
116
117static int actisys_open(struct sir_dev *dev)
118{
119 struct qos_info *qos = &dev->qos;
120
121 sirdev_set_dtr_rts(dev, TRUE, TRUE);
122
123 /* Set the speeds we can accept */
124 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
125
126 /* Remove support for 38400 if this is not a 220L+ dongle */
127 if (dev->dongle_drv->type == IRDA_ACTISYS_DONGLE)
128 qos->baud_rate.bits &= ~IR_38400;
129
130 qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
131 irda_qos_bits_to_value(qos);
132
133 /* irda thread waits 50 msec for power settling */
134
135 return 0;
136}
137
138static int actisys_close(struct sir_dev *dev)
139{
140 /* Power off the dongle */
141 sirdev_set_dtr_rts(dev, FALSE, FALSE);
142
143 return 0;
144}
145
146/*
147 * Function actisys_change_speed (task)
148 *
149 * Change speed of the ACTiSYS IR-220L and IR-220L+ type IrDA dongles.
150 * To cycle through the available baud rates, pulse RTS low for a few us.
151 *
152 * First, we reset the dongle to always start from a known state.
153 * Then, we cycle through the speeds by pulsing RTS low and then up.
154 * The dongle allow us to pulse quite fast, se we can set speed in one go,
155 * which is must faster ( < 100 us) and less complex than what is found
156 * in some other dongle drivers...
157 * Note that even if the new speed is the same as the current speed,
158 * we reassert the speed. This make sure that things are all right,
159 * and it's fast anyway...
160 * By the way, this function will work for both type of dongles,
161 * because the additional speed is at the end of the sequence...
162 */
163static int actisys_change_speed(struct sir_dev *dev, unsigned speed)
164{
165 int ret = 0;
166 int i = 0;
167
168 IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __FUNCTION__,
169 speed, dev->speed);
170
171 /* dongle was already resetted from irda_request state machine,
172 * we are in known state (dongle default)
173 */
174
175 /*
176 * Now, we can set the speed requested. Send RTS pulses until we
177 * reach the target speed
178 */
179 for (i = 0; i < MAX_SPEEDS; i++) {
180 if (speed == baud_rates[i]) {
181 dev->speed = speed;
182 break;
183 }
184 /* Set RTS low for 10 us */
185 sirdev_set_dtr_rts(dev, TRUE, FALSE);
186 udelay(MIN_DELAY);
187
188 /* Set RTS high for 10 us */
189 sirdev_set_dtr_rts(dev, TRUE, TRUE);
190 udelay(MIN_DELAY);
191 }
192
193 /* Check if life is sweet... */
194 if (i >= MAX_SPEEDS) {
195 actisys_reset(dev);
196 ret = -EINVAL; /* This should not happen */
197 }
198
199 /* Basta lavoro, on se casse d'ici... */
200 return ret;
201}
202
203/*
204 * Function actisys_reset (task)
205 *
206 * Reset the Actisys type dongle. Warning, this function must only be
207 * called with a process context!
208 *
209 * We need to do two things in this function :
210 * o first make sure that the dongle is in a state where it can operate
211 * o second put the dongle in a know state
212 *
213 * The dongle is powered of the RTS and DTR lines. In the dongle, there
214 * is a big capacitor to accommodate the current spikes. This capacitor
215 * takes a least 50 ms to be charged. In theory, the Bios set those lines
216 * up, so by the time we arrive here we should be set. It doesn't hurt
217 * to be on the conservative side, so we will wait...
218 * <Martin : move above comment to irda_config_fsm>
219 * Then, we set the speed to 9600 b/s to get in a known state (see in
220 * change_speed for details). It is needed because the IrDA stack
221 * has tried to set the speed immediately after our first return,
222 * so before we can be sure the dongle is up and running.
223 */
224
225static int actisys_reset(struct sir_dev *dev)
226{
227 /* Reset the dongle : set DTR low for 10 us */
228 sirdev_set_dtr_rts(dev, FALSE, TRUE);
229 udelay(MIN_DELAY);
230
231 /* Go back to normal mode */
232 sirdev_set_dtr_rts(dev, TRUE, TRUE);
233
234 dev->speed = 9600; /* That's the default */
235
236 return 0;
237}
238
239MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> - Jean Tourrilhes <jt@hpl.hp.com>");
240MODULE_DESCRIPTION("ACTiSYS IR-220L and IR-220L+ dongle driver");
241MODULE_LICENSE("GPL");
242MODULE_ALIAS("irda-dongle-2"); /* IRDA_ACTISYS_DONGLE */
243MODULE_ALIAS("irda-dongle-3"); /* IRDA_ACTISYS_PLUS_DONGLE */
244
245module_init(actisys_sir_init);
246module_exit(actisys_sir_cleanup);
diff --git a/drivers/net/irda/actisys.c b/drivers/net/irda/actisys.c
new file mode 100644
index 000000000000..b2e31f4a384c
--- /dev/null
+++ b/drivers/net/irda/actisys.c
@@ -0,0 +1,288 @@
1/*********************************************************************
2 *
3 * Filename: actisys.c
4 * Version: 1.0
5 * Description: Implementation for the ACTiSYS IR-220L and IR-220L+
6 * dongles
7 * Status: Beta.
8 * Authors: Dag Brattli <dagb@cs.uit.no> (initially)
9 * Jean Tourrilhes <jt@hpl.hp.com> (new version)
10 * Created at: Wed Oct 21 20:02:35 1998
11 * Modified at: Fri Dec 17 09:10:43 1999
12 * Modified by: Dag Brattli <dagb@cs.uit.no>
13 *
14 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
15 * Copyright (c) 1999 Jean Tourrilhes
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 * Neither Dag Brattli nor University of Tromsų admit liability nor
23 * provide warranty for any of this software. This material is
24 * provided "AS-IS" and at no charge.
25 *
26 ********************************************************************/
27
28/*
29 * Changelog
30 *
31 * 0.8 -> 0.9999 - Jean
32 * o New initialisation procedure : much safer and correct
33 * o New procedure the change speed : much faster and simpler
34 * o Other cleanups & comments
35 * Thanks to Lichen Wang @ Actisys for his excellent help...
36 */
37
38#include <linux/module.h>
39#include <linux/delay.h>
40#include <linux/tty.h>
41#include <linux/init.h>
42
43#include <net/irda/irda.h>
44#include <net/irda/irda_device.h>
45
46/*
47 * Define the timing of the pulses we send to the dongle (to reset it, and
48 * to toggle speeds). Basically, the limit here is the propagation speed of
49 * the signals through the serial port, the dongle being much faster. Any
50 * serial port support 115 kb/s, so we are sure that pulses 8.5 us wide can
51 * go through cleanly . If you are on the wild side, you can try to lower
52 * this value (Actisys recommended me 2 us, and 0 us work for me on a P233!)
53 */
54#define MIN_DELAY 10 /* 10 us to be on the conservative side */
55
56static int actisys_change_speed(struct irda_task *task);
57static int actisys_reset(struct irda_task *task);
58static void actisys_open(dongle_t *self, struct qos_info *qos);
59static void actisys_close(dongle_t *self);
60
61/* These are the baudrates supported, in the order available */
62/* Note : the 220L doesn't support 38400, but we will fix that below */
63static __u32 baud_rates[] = { 9600, 19200, 57600, 115200, 38400 };
64#define MAX_SPEEDS 5
65
66static struct dongle_reg dongle = {
67 .type = IRDA_ACTISYS_DONGLE,
68 .open = actisys_open,
69 .close = actisys_close,
70 .reset = actisys_reset,
71 .change_speed = actisys_change_speed,
72 .owner = THIS_MODULE,
73};
74
75static struct dongle_reg dongle_plus = {
76 .type = IRDA_ACTISYS_PLUS_DONGLE,
77 .open = actisys_open,
78 .close = actisys_close,
79 .reset = actisys_reset,
80 .change_speed = actisys_change_speed,
81 .owner = THIS_MODULE,
82};
83
84/*
85 * Function actisys_change_speed (task)
86 *
87 * There is two model of Actisys dongle we are dealing with,
88 * the 220L and 220L+. At this point, only irattach knows with
89 * kind the user has requested (it was an argument on irattach
90 * command line).
91 * So, we register a dongle of each sort and let irattach
92 * pick the right one...
93 */
94static int __init actisys_init(void)
95{
96 int ret;
97
98 /* First, register an Actisys 220L dongle */
99 ret = irda_device_register_dongle(&dongle);
100 if (ret < 0)
101 return ret;
102 /* Now, register an Actisys 220L+ dongle */
103 ret = irda_device_register_dongle(&dongle_plus);
104 if (ret < 0) {
105 irda_device_unregister_dongle(&dongle);
106 return ret;
107 }
108 return 0;
109}
110
111static void __exit actisys_cleanup(void)
112{
113 /* We have to remove both dongles */
114 irda_device_unregister_dongle(&dongle);
115 irda_device_unregister_dongle(&dongle_plus);
116}
117
118static void actisys_open(dongle_t *self, struct qos_info *qos)
119{
120 /* Power on the dongle */
121 self->set_dtr_rts(self->dev, TRUE, TRUE);
122
123 /* Set the speeds we can accept */
124 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
125
126 /* Remove support for 38400 if this is not a 220L+ dongle */
127 if (self->issue->type == IRDA_ACTISYS_DONGLE)
128 qos->baud_rate.bits &= ~IR_38400;
129
130 qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
131}
132
133static void actisys_close(dongle_t *self)
134{
135 /* Power off the dongle */
136 self->set_dtr_rts(self->dev, FALSE, FALSE);
137}
138
139/*
140 * Function actisys_change_speed (task)
141 *
142 * Change speed of the ACTiSYS IR-220L and IR-220L+ type IrDA dongles.
143 * To cycle through the available baud rates, pulse RTS low for a few us.
144 *
145 * First, we reset the dongle to always start from a known state.
146 * Then, we cycle through the speeds by pulsing RTS low and then up.
147 * The dongle allow us to pulse quite fast, se we can set speed in one go,
148 * which is must faster ( < 100 us) and less complex than what is found
149 * in some other dongle drivers...
150 * Note that even if the new speed is the same as the current speed,
151 * we reassert the speed. This make sure that things are all right,
152 * and it's fast anyway...
153 * By the way, this function will work for both type of dongles,
154 * because the additional speed is at the end of the sequence...
155 */
156static int actisys_change_speed(struct irda_task *task)
157{
158 dongle_t *self = (dongle_t *) task->instance;
159 __u32 speed = (__u32) task->param; /* Target speed */
160 int ret = 0;
161 int i = 0;
162
163 IRDA_DEBUG(4, "%s(), speed=%d (was %d)\n", __FUNCTION__, speed,
164 self->speed);
165
166 /* Go to a known state by reseting the dongle */
167
168 /* Reset the dongle : set DTR low for 10 us */
169 self->set_dtr_rts(self->dev, FALSE, TRUE);
170 udelay(MIN_DELAY);
171
172 /* Go back to normal mode (we are now at 9600 b/s) */
173 self->set_dtr_rts(self->dev, TRUE, TRUE);
174
175 /*
176 * Now, we can set the speed requested. Send RTS pulses until we
177 * reach the target speed
178 */
179 for (i=0; i<MAX_SPEEDS; i++) {
180 if (speed == baud_rates[i]) {
181 self->speed = baud_rates[i];
182 break;
183 }
184 /* Make sure previous pulse is finished */
185 udelay(MIN_DELAY);
186
187 /* Set RTS low for 10 us */
188 self->set_dtr_rts(self->dev, TRUE, FALSE);
189 udelay(MIN_DELAY);
190
191 /* Set RTS high for 10 us */
192 self->set_dtr_rts(self->dev, TRUE, TRUE);
193 }
194
195 /* Check if life is sweet... */
196 if (i >= MAX_SPEEDS)
197 ret = -1; /* This should not happen */
198
199 /* Basta lavoro, on se casse d'ici... */
200 irda_task_next_state(task, IRDA_TASK_DONE);
201
202 return ret;
203}
204
205/*
206 * Function actisys_reset (task)
207 *
208 * Reset the Actisys type dongle. Warning, this function must only be
209 * called with a process context!
210 *
211 * We need to do two things in this function :
212 * o first make sure that the dongle is in a state where it can operate
213 * o second put the dongle in a know state
214 *
215 * The dongle is powered of the RTS and DTR lines. In the dongle, there
216 * is a big capacitor to accommodate the current spikes. This capacitor
217 * takes a least 50 ms to be charged. In theory, the Bios set those lines
218 * up, so by the time we arrive here we should be set. It doesn't hurt
219 * to be on the conservative side, so we will wait...
220 * Then, we set the speed to 9600 b/s to get in a known state (see in
221 * change_speed for details). It is needed because the IrDA stack
222 * has tried to set the speed immediately after our first return,
223 * so before we can be sure the dongle is up and running.
224 */
225static int actisys_reset(struct irda_task *task)
226{
227 dongle_t *self = (dongle_t *) task->instance;
228 int ret = 0;
229
230 IRDA_ASSERT(task != NULL, return -1;);
231
232 self->reset_task = task;
233
234 switch (task->state) {
235 case IRDA_TASK_INIT:
236 /* Set both DTR & RTS to power up the dongle */
237 /* In theory redundant with power up in actisys_open() */
238 self->set_dtr_rts(self->dev, TRUE, TRUE);
239
240 /* Sleep 50 ms to make sure capacitor is charged */
241 ret = msecs_to_jiffies(50);
242 irda_task_next_state(task, IRDA_TASK_WAIT);
243 break;
244 case IRDA_TASK_WAIT:
245 /* Reset the dongle : set DTR low for 10 us */
246 self->set_dtr_rts(self->dev, FALSE, TRUE);
247 udelay(MIN_DELAY);
248
249 /* Go back to normal mode */
250 self->set_dtr_rts(self->dev, TRUE, TRUE);
251
252 irda_task_next_state(task, IRDA_TASK_DONE);
253 self->reset_task = NULL;
254 self->speed = 9600; /* That's the default */
255 break;
256 default:
257 IRDA_ERROR("%s(), unknown state %d\n",
258 __FUNCTION__, task->state);
259 irda_task_next_state(task, IRDA_TASK_DONE);
260 self->reset_task = NULL;
261 ret = -1;
262 break;
263 }
264 return ret;
265}
266
267MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no> - Jean Tourrilhes <jt@hpl.hp.com>");
268MODULE_DESCRIPTION("ACTiSYS IR-220L and IR-220L+ dongle driver");
269MODULE_LICENSE("GPL");
270MODULE_ALIAS("irda-dongle-2"); /* IRDA_ACTISYS_DONGLE */
271MODULE_ALIAS("irda-dongle-3"); /* IRDA_ACTISYS_PLUS_DONGLE */
272
273
274/*
275 * Function init_module (void)
276 *
277 * Initialize Actisys module
278 *
279 */
280module_init(actisys_init);
281
282/*
283 * Function cleanup_module (void)
284 *
285 * Cleanup Actisys module
286 *
287 */
288module_exit(actisys_cleanup);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
new file mode 100644
index 000000000000..9bf34681d3df
--- /dev/null
+++ b/drivers/net/irda/ali-ircc.c
@@ -0,0 +1,2277 @@
1/*********************************************************************
2 *
3 * Filename: ali-ircc.h
4 * Version: 0.5
5 * Description: Driver for the ALI M1535D and M1543C FIR Controller
6 * Status: Experimental.
7 * Author: Benjamin Kong <benjamin_kong@ali.com.tw>
8 * Created at: 2000/10/16 03:46PM
9 * Modified at: 2001/1/3 02:55PM
10 * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
11 * Modified at: 2003/11/6 and support for ALi south-bridge chipsets M1563
12 * Modified by: Clear Zhang <clear_zhang@ali.com.tw>
13 *
14 * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
15 * All Rights Reserved
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 ********************************************************************/
23
24#include <linux/module.h>
25
26#include <linux/kernel.h>
27#include <linux/types.h>
28#include <linux/skbuff.h>
29#include <linux/netdevice.h>
30#include <linux/ioport.h>
31#include <linux/delay.h>
32#include <linux/slab.h>
33#include <linux/init.h>
34#include <linux/rtnetlink.h>
35#include <linux/serial_reg.h>
36#include <linux/dma-mapping.h>
37
38#include <asm/io.h>
39#include <asm/dma.h>
40#include <asm/byteorder.h>
41
42#include <linux/pm.h>
43
44#include <net/irda/wrapper.h>
45#include <net/irda/irda.h>
46#include <net/irda/irda_device.h>
47
48#include "ali-ircc.h"
49
50#define CHIP_IO_EXTENT 8
51#define BROKEN_DONGLE_ID
52
53static char *driver_name = "ali-ircc";
54
55/* Module parameters */
56static int qos_mtt_bits = 0x07; /* 1 ms or more */
57
58/* Use BIOS settions by default, but user may supply module parameters */
59static unsigned int io[] = { ~0, ~0, ~0, ~0 };
60static unsigned int irq[] = { 0, 0, 0, 0 };
61static unsigned int dma[] = { 0, 0, 0, 0 };
62
63static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info);
64static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info);
65static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info);
66
67/* These are the currently known ALi sourth-bridge chipsets, the only one difference
68 * is that M1543C doesn't support HP HDSL-3600
69 */
70static ali_chip_t chips[] =
71{
72 { "M1543", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x43, ali_ircc_probe_53, ali_ircc_init_43 },
73 { "M1535", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x53, ali_ircc_probe_53, ali_ircc_init_53 },
74 { "M1563", { 0x3f0, 0x370 }, 0x51, 0x23, 0x20, 0x63, ali_ircc_probe_53, ali_ircc_init_53 },
75 { NULL }
76};
77
78/* Max 4 instances for now */
79static struct ali_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
80
81/* Dongle Types */
82static char *dongle_types[] = {
83 "TFDS6000",
84 "HP HSDL-3600",
85 "HP HSDL-1100",
86 "No dongle connected",
87};
88
89/* Some prototypes */
90static int ali_ircc_open(int i, chipio_t *info);
91
92static int ali_ircc_close(struct ali_ircc_cb *self);
93
94static int ali_ircc_setup(chipio_t *info);
95static int ali_ircc_is_receiving(struct ali_ircc_cb *self);
96static int ali_ircc_net_open(struct net_device *dev);
97static int ali_ircc_net_close(struct net_device *dev);
98static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
99static int ali_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
100static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
101static void ali_ircc_suspend(struct ali_ircc_cb *self);
102static void ali_ircc_wakeup(struct ali_ircc_cb *self);
103static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev);
104
105/* SIR function */
106static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
107static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self);
108static void ali_ircc_sir_receive(struct ali_ircc_cb *self);
109static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self);
110static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
111static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
112
113/* FIR function */
114static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
115static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 speed);
116static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self);
117static int ali_ircc_dma_receive(struct ali_ircc_cb *self);
118static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self);
119static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self);
120static void ali_ircc_dma_xmit(struct ali_ircc_cb *self);
121
122/* My Function */
123static int ali_ircc_read_dongle_id (int i, chipio_t *info);
124static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed);
125
126/* ALi chip function */
127static void SIR2FIR(int iobase);
128static void FIR2SIR(int iobase);
129static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable);
130
131/*
132 * Function ali_ircc_init ()
133 *
134 * Initialize chip. Find out whay kinds of chips we are dealing with
135 * and their configuation registers address
136 */
137static int __init ali_ircc_init(void)
138{
139 ali_chip_t *chip;
140 chipio_t info;
141 int ret = -ENODEV;
142 int cfg, cfg_base;
143 int reg, revision;
144 int i = 0;
145
146 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
147
148 /* Probe for all the ALi chipsets we know about */
149 for (chip= chips; chip->name; chip++, i++)
150 {
151 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, chip->name);
152
153 /* Try all config registers for this chip */
154 for (cfg=0; cfg<2; cfg++)
155 {
156 cfg_base = chip->cfg[cfg];
157 if (!cfg_base)
158 continue;
159
160 memset(&info, 0, sizeof(chipio_t));
161 info.cfg_base = cfg_base;
162 info.fir_base = io[i];
163 info.dma = dma[i];
164 info.irq = irq[i];
165
166
167 /* Enter Configuration */
168 outb(chip->entr1, cfg_base);
169 outb(chip->entr2, cfg_base);
170
171 /* Select Logical Device 5 Registers (UART2) */
172 outb(0x07, cfg_base);
173 outb(0x05, cfg_base+1);
174
175 /* Read Chip Identification Register */
176 outb(chip->cid_index, cfg_base);
177 reg = inb(cfg_base+1);
178
179 if (reg == chip->cid_value)
180 {
181 IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __FUNCTION__, cfg_base);
182
183 outb(0x1F, cfg_base);
184 revision = inb(cfg_base+1);
185 IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __FUNCTION__,
186 chip->name, revision);
187
188 /*
189 * If the user supplies the base address, then
190 * we init the chip, if not we probe the values
191 * set by the BIOS
192 */
193 if (io[i] < 2000)
194 {
195 chip->init(chip, &info);
196 }
197 else
198 {
199 chip->probe(chip, &info);
200 }
201
202 if (ali_ircc_open(i, &info) == 0)
203 ret = 0;
204 i++;
205 }
206 else
207 {
208 IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __FUNCTION__, chip->name, cfg_base);
209 }
210 /* Exit configuration */
211 outb(0xbb, cfg_base);
212 }
213 }
214
215 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
216 return ret;
217}
218
219/*
220 * Function ali_ircc_cleanup ()
221 *
222 * Close all configured chips
223 *
224 */
225static void __exit ali_ircc_cleanup(void)
226{
227 int i;
228
229 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
230
231 pm_unregister_all(ali_ircc_pmproc);
232
233 for (i=0; i < 4; i++) {
234 if (dev_self[i])
235 ali_ircc_close(dev_self[i]);
236 }
237
238 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
239}
240
241/*
242 * Function ali_ircc_open (int i, chipio_t *inf)
243 *
244 * Open driver instance
245 *
246 */
247static int ali_ircc_open(int i, chipio_t *info)
248{
249 struct net_device *dev;
250 struct ali_ircc_cb *self;
251 struct pm_dev *pmdev;
252 int dongle_id;
253 int err;
254
255 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
256
257 /* Set FIR FIFO and DMA Threshold */
258 if ((ali_ircc_setup(info)) == -1)
259 return -1;
260
261 dev = alloc_irdadev(sizeof(*self));
262 if (dev == NULL) {
263 IRDA_ERROR("%s(), can't allocate memory for control block!\n",
264 __FUNCTION__);
265 return -ENOMEM;
266 }
267
268 self = dev->priv;
269 self->netdev = dev;
270 spin_lock_init(&self->lock);
271
272 /* Need to store self somewhere */
273 dev_self[i] = self;
274 self->index = i;
275
276 /* Initialize IO */
277 self->io.cfg_base = info->cfg_base; /* In ali_ircc_probe_53 assign */
278 self->io.fir_base = info->fir_base; /* info->sir_base = info->fir_base */
279 self->io.sir_base = info->sir_base; /* ALi SIR and FIR use the same address */
280 self->io.irq = info->irq;
281 self->io.fir_ext = CHIP_IO_EXTENT;
282 self->io.dma = info->dma;
283 self->io.fifo_size = 16; /* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
284
285 /* Reserve the ioports that we need */
286 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
287 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n", __FUNCTION__,
288 self->io.fir_base);
289 err = -ENODEV;
290 goto err_out1;
291 }
292
293 /* Initialize QoS for this device */
294 irda_init_max_qos_capabilies(&self->qos);
295
296 /* The only value we must override it the baudrate */
297 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
298 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM
299
300 self->qos.min_turn_time.bits = qos_mtt_bits;
301
302 irda_qos_bits_to_value(&self->qos);
303
304 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
305 self->rx_buff.truesize = 14384;
306 self->tx_buff.truesize = 14384;
307
308 /* Allocate memory if needed */
309 self->rx_buff.head =
310 dma_alloc_coherent(NULL, self->rx_buff.truesize,
311 &self->rx_buff_dma, GFP_KERNEL);
312 if (self->rx_buff.head == NULL) {
313 err = -ENOMEM;
314 goto err_out2;
315 }
316 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
317
318 self->tx_buff.head =
319 dma_alloc_coherent(NULL, self->tx_buff.truesize,
320 &self->tx_buff_dma, GFP_KERNEL);
321 if (self->tx_buff.head == NULL) {
322 err = -ENOMEM;
323 goto err_out3;
324 }
325 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
326
327 self->rx_buff.in_frame = FALSE;
328 self->rx_buff.state = OUTSIDE_FRAME;
329 self->tx_buff.data = self->tx_buff.head;
330 self->rx_buff.data = self->rx_buff.head;
331
332 /* Reset Tx queue info */
333 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
334 self->tx_fifo.tail = self->tx_buff.head;
335
336
337 /* Keep track of module usage */
338 SET_MODULE_OWNER(dev);
339
340 /* Override the network functions we need to use */
341 dev->hard_start_xmit = ali_ircc_sir_hard_xmit;
342 dev->open = ali_ircc_net_open;
343 dev->stop = ali_ircc_net_close;
344 dev->do_ioctl = ali_ircc_net_ioctl;
345 dev->get_stats = ali_ircc_net_get_stats;
346
347 err = register_netdev(dev);
348 if (err) {
349 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
350 goto err_out4;
351 }
352 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
353
354 /* Check dongle id */
355 dongle_id = ali_ircc_read_dongle_id(i, info);
356 IRDA_MESSAGE("%s(), %s, Found dongle: %s\n", __FUNCTION__, driver_name, dongle_types[dongle_id]);
357
358 self->io.dongle_id = dongle_id;
359
360 pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, ali_ircc_pmproc);
361 if (pmdev)
362 pmdev->data = self;
363
364 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
365
366 return 0;
367
368 err_out4:
369 dma_free_coherent(NULL, self->tx_buff.truesize,
370 self->tx_buff.head, self->tx_buff_dma);
371 err_out3:
372 dma_free_coherent(NULL, self->rx_buff.truesize,
373 self->rx_buff.head, self->rx_buff_dma);
374 err_out2:
375 release_region(self->io.fir_base, self->io.fir_ext);
376 err_out1:
377 dev_self[i] = NULL;
378 free_netdev(dev);
379 return err;
380}
381
382
383/*
384 * Function ali_ircc_close (self)
385 *
386 * Close driver instance
387 *
388 */
389static int __exit ali_ircc_close(struct ali_ircc_cb *self)
390{
391 int iobase;
392
393 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
394
395 IRDA_ASSERT(self != NULL, return -1;);
396
397 iobase = self->io.fir_base;
398
399 /* Remove netdevice */
400 unregister_netdev(self->netdev);
401
402 /* Release the PORT that this driver is using */
403 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __FUNCTION__, self->io.fir_base);
404 release_region(self->io.fir_base, self->io.fir_ext);
405
406 if (self->tx_buff.head)
407 dma_free_coherent(NULL, self->tx_buff.truesize,
408 self->tx_buff.head, self->tx_buff_dma);
409
410 if (self->rx_buff.head)
411 dma_free_coherent(NULL, self->rx_buff.truesize,
412 self->rx_buff.head, self->rx_buff_dma);
413
414 dev_self[self->index] = NULL;
415 free_netdev(self->netdev);
416
417 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
418
419 return 0;
420}
421
422/*
423 * Function ali_ircc_init_43 (chip, info)
424 *
425 * Initialize the ALi M1543 chip.
426 */
427static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info)
428{
429 /* All controller information like I/O address, DMA channel, IRQ
430 * are set by BIOS
431 */
432
433 return 0;
434}
435
436/*
437 * Function ali_ircc_init_53 (chip, info)
438 *
439 * Initialize the ALi M1535 chip.
440 */
441static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info)
442{
443 /* All controller information like I/O address, DMA channel, IRQ
444 * are set by BIOS
445 */
446
447 return 0;
448}
449
450/*
451 * Function ali_ircc_probe_53 (chip, info)
452 *
453 * Probes for the ALi M1535D or M1535
454 */
455static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
456{
457 int cfg_base = info->cfg_base;
458 int hi, low, reg;
459
460 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
461
462 /* Enter Configuration */
463 outb(chip->entr1, cfg_base);
464 outb(chip->entr2, cfg_base);
465
466 /* Select Logical Device 5 Registers (UART2) */
467 outb(0x07, cfg_base);
468 outb(0x05, cfg_base+1);
469
470 /* Read address control register */
471 outb(0x60, cfg_base);
472 hi = inb(cfg_base+1);
473 outb(0x61, cfg_base);
474 low = inb(cfg_base+1);
475 info->fir_base = (hi<<8) + low;
476
477 info->sir_base = info->fir_base;
478
479 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__, info->fir_base);
480
481 /* Read IRQ control register */
482 outb(0x70, cfg_base);
483 reg = inb(cfg_base+1);
484 info->irq = reg & 0x0f;
485 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq);
486
487 /* Read DMA channel */
488 outb(0x74, cfg_base);
489 reg = inb(cfg_base+1);
490 info->dma = reg & 0x07;
491
492 if(info->dma == 0x04)
493 IRDA_WARNING("%s(), No DMA channel assigned !\n", __FUNCTION__);
494 else
495 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma);
496
497 /* Read Enabled Status */
498 outb(0x30, cfg_base);
499 reg = inb(cfg_base+1);
500 info->enabled = (reg & 0x80) && (reg & 0x01);
501 IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __FUNCTION__, info->enabled);
502
503 /* Read Power Status */
504 outb(0x22, cfg_base);
505 reg = inb(cfg_base+1);
506 info->suspended = (reg & 0x20);
507 IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __FUNCTION__, info->suspended);
508
509 /* Exit configuration */
510 outb(0xbb, cfg_base);
511
512 IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
513
514 return 0;
515}
516
517/*
518 * Function ali_ircc_setup (info)
519 *
520 * Set FIR FIFO and DMA Threshold
521 * Returns non-negative on success.
522 *
523 */
524static int ali_ircc_setup(chipio_t *info)
525{
526 unsigned char tmp;
527 int version;
528 int iobase = info->fir_base;
529
530 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
531
532 /* Locking comments :
533 * Most operations here need to be protected. We are called before
534 * the device instance is created in ali_ircc_open(), therefore
535 * nobody can bother us - Jean II */
536
537 /* Switch to FIR space */
538 SIR2FIR(iobase);
539
540 /* Master Reset */
541 outb(0x40, iobase+FIR_MCR); // benjamin 2000/11/30 11:45AM
542
543 /* Read FIR ID Version Register */
544 switch_bank(iobase, BANK3);
545 version = inb(iobase+FIR_ID_VR);
546
547 /* Should be 0x00 in the M1535/M1535D */
548 if(version != 0x00)
549 {
550 IRDA_ERROR("%s, Wrong chip version %02x\n", driver_name, version);
551 return -1;
552 }
553
554 // IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, info->cfg_base);
555
556 /* Set FIR FIFO Threshold Register */
557 switch_bank(iobase, BANK1);
558 outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
559
560 /* Set FIR DMA Threshold Register */
561 outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
562
563 /* CRC enable */
564 switch_bank(iobase, BANK2);
565 outb(inb(iobase+FIR_IRDA_CR) | IRDA_CR_CRC, iobase+FIR_IRDA_CR);
566
567 /* NDIS driver set TX Length here BANK2 Alias 3, Alias4*/
568
569 /* Switch to Bank 0 */
570 switch_bank(iobase, BANK0);
571
572 tmp = inb(iobase+FIR_LCR_B);
573 tmp &=~0x20; // disable SIP
574 tmp |= 0x80; // these two steps make RX mode
575 tmp &= 0xbf;
576 outb(tmp, iobase+FIR_LCR_B);
577
578 /* Disable Interrupt */
579 outb(0x00, iobase+FIR_IER);
580
581
582 /* Switch to SIR space */
583 FIR2SIR(iobase);
584
585 IRDA_MESSAGE("%s, driver loaded (Benjamin Kong)\n", driver_name);
586
587 /* Enable receive interrupts */
588 // outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
589 // Turn on the interrupts in ali_ircc_net_open
590
591 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
592
593 return 0;
594}
595
596/*
597 * Function ali_ircc_read_dongle_id (int index, info)
598 *
599 * Try to read dongle indentification. This procedure needs to be executed
600 * once after power-on/reset. It also needs to be used whenever you suspect
601 * that the user may have plugged/unplugged the IrDA Dongle.
602 */
603static int ali_ircc_read_dongle_id (int i, chipio_t *info)
604{
605 int dongle_id, reg;
606 int cfg_base = info->cfg_base;
607
608 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
609
610 /* Enter Configuration */
611 outb(chips[i].entr1, cfg_base);
612 outb(chips[i].entr2, cfg_base);
613
614 /* Select Logical Device 5 Registers (UART2) */
615 outb(0x07, cfg_base);
616 outb(0x05, cfg_base+1);
617
618 /* Read Dongle ID */
619 outb(0xf0, cfg_base);
620 reg = inb(cfg_base+1);
621 dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
622 IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __FUNCTION__,
623 dongle_id, dongle_types[dongle_id]);
624
625 /* Exit configuration */
626 outb(0xbb, cfg_base);
627
628 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
629
630 return dongle_id;
631}
632
633/*
634 * Function ali_ircc_interrupt (irq, dev_id, regs)
635 *
636 * An interrupt from the chip has arrived. Time to do some work
637 *
638 */
639static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id,
640 struct pt_regs *regs)
641{
642 struct net_device *dev = (struct net_device *) dev_id;
643 struct ali_ircc_cb *self;
644 int ret;
645
646 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
647
648 if (!dev) {
649 IRDA_WARNING("%s: irq %d for unknown device.\n", driver_name, irq);
650 return IRQ_NONE;
651 }
652
653 self = (struct ali_ircc_cb *) dev->priv;
654
655 spin_lock(&self->lock);
656
657 /* Dispatch interrupt handler for the current speed */
658 if (self->io.speed > 115200)
659 ret = ali_ircc_fir_interrupt(self);
660 else
661 ret = ali_ircc_sir_interrupt(self);
662
663 spin_unlock(&self->lock);
664
665 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
666 return ret;
667}
668/*
669 * Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self)
670 *
671 * Handle MIR/FIR interrupt
672 *
673 */
674static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
675{
676 __u8 eir, OldMessageCount;
677 int iobase, tmp;
678
679 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
680
681 iobase = self->io.fir_base;
682
683 switch_bank(iobase, BANK0);
684 self->InterruptID = inb(iobase+FIR_IIR);
685 self->BusStatus = inb(iobase+FIR_BSR);
686
687 OldMessageCount = (self->LineStatus + 1) & 0x07;
688 self->LineStatus = inb(iobase+FIR_LSR);
689 //self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
690 eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
691
692 IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __FUNCTION__,self->InterruptID);
693 IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __FUNCTION__,self->LineStatus);
694 IRDA_DEBUG(1, "%s(), self->ier = %x\n", __FUNCTION__,self->ier);
695 IRDA_DEBUG(1, "%s(), eir = %x\n", __FUNCTION__,eir);
696
697 /* Disable interrupts */
698 SetCOMInterrupts(self, FALSE);
699
700 /* Tx or Rx Interrupt */
701
702 if (eir & IIR_EOM)
703 {
704 if (self->io.direction == IO_XMIT) /* TX */
705 {
706 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __FUNCTION__);
707
708 if(ali_ircc_dma_xmit_complete(self))
709 {
710 if (irda_device_txqueue_empty(self->netdev))
711 {
712 /* Prepare for receive */
713 ali_ircc_dma_receive(self);
714 self->ier = IER_EOM;
715 }
716 }
717 else
718 {
719 self->ier = IER_EOM;
720 }
721
722 }
723 else /* RX */
724 {
725 IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __FUNCTION__);
726
727 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
728 {
729 self->rcvFramesOverflow = TRUE;
730 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __FUNCTION__);
731 }
732
733 if (ali_ircc_dma_receive_complete(self))
734 {
735 IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __FUNCTION__);
736
737 self->ier = IER_EOM;
738 }
739 else
740 {
741 IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __FUNCTION__);
742
743 self->ier = IER_EOM | IER_TIMER;
744 }
745
746 }
747 }
748 /* Timer Interrupt */
749 else if (eir & IIR_TIMER)
750 {
751 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
752 {
753 self->rcvFramesOverflow = TRUE;
754 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __FUNCTION__);
755 }
756 /* Disable Timer */
757 switch_bank(iobase, BANK1);
758 tmp = inb(iobase+FIR_CR);
759 outb( tmp& ~CR_TIMER_EN, iobase+FIR_CR);
760
761 /* Check if this is a Tx timer interrupt */
762 if (self->io.direction == IO_XMIT)
763 {
764 ali_ircc_dma_xmit(self);
765
766 /* Interrupt on EOM */
767 self->ier = IER_EOM;
768
769 }
770 else /* Rx */
771 {
772 if(ali_ircc_dma_receive_complete(self))
773 {
774 self->ier = IER_EOM;
775 }
776 else
777 {
778 self->ier = IER_EOM | IER_TIMER;
779 }
780 }
781 }
782
783 /* Restore Interrupt */
784 SetCOMInterrupts(self, TRUE);
785
786 IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __FUNCTION__);
787 return IRQ_RETVAL(eir);
788}
789
790/*
791 * Function ali_ircc_sir_interrupt (irq, self, eir)
792 *
793 * Handle SIR interrupt
794 *
795 */
796static irqreturn_t ali_ircc_sir_interrupt(struct ali_ircc_cb *self)
797{
798 int iobase;
799 int iir, lsr;
800
801 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
802
803 iobase = self->io.sir_base;
804
805 iir = inb(iobase+UART_IIR) & UART_IIR_ID;
806 if (iir) {
807 /* Clear interrupt */
808 lsr = inb(iobase+UART_LSR);
809
810 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __FUNCTION__,
811 iir, lsr, iobase);
812
813 switch (iir)
814 {
815 case UART_IIR_RLSI:
816 IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__);
817 break;
818 case UART_IIR_RDI:
819 /* Receive interrupt */
820 ali_ircc_sir_receive(self);
821 break;
822 case UART_IIR_THRI:
823 if (lsr & UART_LSR_THRE)
824 {
825 /* Transmitter ready for data */
826 ali_ircc_sir_write_wakeup(self);
827 }
828 break;
829 default:
830 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __FUNCTION__, iir);
831 break;
832 }
833
834 }
835
836
837 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
838
839 return IRQ_RETVAL(iir);
840}
841
842
843/*
844 * Function ali_ircc_sir_receive (self)
845 *
846 * Receive one frame from the infrared port
847 *
848 */
849static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
850{
851 int boguscount = 0;
852 int iobase;
853
854 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
855 IRDA_ASSERT(self != NULL, return;);
856
857 iobase = self->io.sir_base;
858
859 /*
860 * Receive all characters in Rx FIFO, unwrap and unstuff them.
861 * async_unwrap_char will deliver all found frames
862 */
863 do {
864 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
865 inb(iobase+UART_RX));
866
867 /* Make sure we don't stay here too long */
868 if (boguscount++ > 32) {
869 IRDA_DEBUG(2,"%s(), breaking!\n", __FUNCTION__);
870 break;
871 }
872 } while (inb(iobase+UART_LSR) & UART_LSR_DR);
873
874 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
875}
876
877/*
878 * Function ali_ircc_sir_write_wakeup (tty)
879 *
880 * Called by the driver when there's room for more data. If we have
881 * more packets to send, we send them here.
882 *
883 */
884static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
885{
886 int actual = 0;
887 int iobase;
888
889 IRDA_ASSERT(self != NULL, return;);
890
891 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
892
893 iobase = self->io.sir_base;
894
895 /* Finished with frame? */
896 if (self->tx_buff.len > 0)
897 {
898 /* Write data left in transmit buffer */
899 actual = ali_ircc_sir_write(iobase, self->io.fifo_size,
900 self->tx_buff.data, self->tx_buff.len);
901 self->tx_buff.data += actual;
902 self->tx_buff.len -= actual;
903 }
904 else
905 {
906 if (self->new_speed)
907 {
908 /* We must wait until all data are gone */
909 while(!(inb(iobase+UART_LSR) & UART_LSR_TEMT))
910 IRDA_DEBUG(1, "%s(), UART_LSR_THRE\n", __FUNCTION__ );
911
912 IRDA_DEBUG(1, "%s(), Changing speed! self->new_speed = %d\n", __FUNCTION__ , self->new_speed);
913 ali_ircc_change_speed(self, self->new_speed);
914 self->new_speed = 0;
915
916 // benjamin 2000/11/10 06:32PM
917 if (self->io.speed > 115200)
918 {
919 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __FUNCTION__ );
920
921 self->ier = IER_EOM;
922 // SetCOMInterrupts(self, TRUE);
923 return;
924 }
925 }
926 else
927 {
928 netif_wake_queue(self->netdev);
929 }
930
931 self->stats.tx_packets++;
932
933 /* Turn on receive interrupts */
934 outb(UART_IER_RDI, iobase+UART_IER);
935 }
936
937 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
938}
939
940static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
941{
942 struct net_device *dev = self->netdev;
943 int iobase;
944
945 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
946
947 IRDA_DEBUG(2, "%s(), setting speed = %d \n", __FUNCTION__ , baud);
948
949 /* This function *must* be called with irq off and spin-lock.
950 * - Jean II */
951
952 iobase = self->io.fir_base;
953
954 SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM
955
956 /* Go to MIR, FIR Speed */
957 if (baud > 115200)
958 {
959
960
961 ali_ircc_fir_change_speed(self, baud);
962
963 /* Install FIR xmit handler*/
964 dev->hard_start_xmit = ali_ircc_fir_hard_xmit;
965
966 /* Enable Interuupt */
967 self->ier = IER_EOM; // benjamin 2000/11/20 07:24PM
968
969 /* Be ready for incomming frames */
970 ali_ircc_dma_receive(self); // benajmin 2000/11/8 07:46PM not complete
971 }
972 /* Go to SIR Speed */
973 else
974 {
975 ali_ircc_sir_change_speed(self, baud);
976
977 /* Install SIR xmit handler*/
978 dev->hard_start_xmit = ali_ircc_sir_hard_xmit;
979 }
980
981
982 SetCOMInterrupts(self, TRUE); // 2000/11/24 11:43AM
983
984 netif_wake_queue(self->netdev);
985
986 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
987}
988
989static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
990{
991
992 int iobase;
993 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
994 struct net_device *dev;
995
996 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
997
998 IRDA_ASSERT(self != NULL, return;);
999
1000 dev = self->netdev;
1001 iobase = self->io.fir_base;
1002
1003 IRDA_DEBUG(1, "%s(), self->io.speed = %d, change to speed = %d\n", __FUNCTION__ ,self->io.speed,baud);
1004
1005 /* Come from SIR speed */
1006 if(self->io.speed <=115200)
1007 {
1008 SIR2FIR(iobase);
1009 }
1010
1011 /* Update accounting for new speed */
1012 self->io.speed = baud;
1013
1014 // Set Dongle Speed mode
1015 ali_ircc_change_dongle_speed(self, baud);
1016
1017 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1018}
1019
1020/*
1021 * Function ali_sir_change_speed (self, speed)
1022 *
1023 * Set speed of IrDA port to specified baudrate
1024 *
1025 */
1026static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
1027{
1028 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
1029 unsigned long flags;
1030 int iobase;
1031 int fcr; /* FIFO control reg */
1032 int lcr; /* Line control reg */
1033 int divisor;
1034
1035 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
1036
1037 IRDA_DEBUG(1, "%s(), Setting speed to: %d\n", __FUNCTION__ , speed);
1038
1039 IRDA_ASSERT(self != NULL, return;);
1040
1041 iobase = self->io.sir_base;
1042
1043 /* Come from MIR or FIR speed */
1044 if(self->io.speed >115200)
1045 {
1046 // Set Dongle Speed mode first
1047 ali_ircc_change_dongle_speed(self, speed);
1048
1049 FIR2SIR(iobase);
1050 }
1051
1052 // Clear Line and Auxiluary status registers 2000/11/24 11:47AM
1053
1054 inb(iobase+UART_LSR);
1055 inb(iobase+UART_SCR);
1056
1057 /* Update accounting for new speed */
1058 self->io.speed = speed;
1059
1060 spin_lock_irqsave(&self->lock, flags);
1061
1062 divisor = 115200/speed;
1063
1064 fcr = UART_FCR_ENABLE_FIFO;
1065
1066 /*
1067 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
1068 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
1069 * about this timeout since it will always be fast enough.
1070 */
1071 if (self->io.speed < 38400)
1072 fcr |= UART_FCR_TRIGGER_1;
1073 else
1074 fcr |= UART_FCR_TRIGGER_14;
1075
1076 /* IrDA ports use 8N1 */
1077 lcr = UART_LCR_WLEN8;
1078
1079 outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
1080 outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
1081 outb(divisor >> 8, iobase+UART_DLM);
1082 outb(lcr, iobase+UART_LCR); /* Set 8N1 */
1083 outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
1084
1085 /* without this, the conection will be broken after come back from FIR speed,
1086 but with this, the SIR connection is harder to established */
1087 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
1088
1089 spin_unlock_irqrestore(&self->lock, flags);
1090
1091 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1092}
1093
1094static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
1095{
1096
1097 struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
1098 int iobase,dongle_id;
1099 int tmp = 0;
1100
1101 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
1102
1103 iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
1104 dongle_id = self->io.dongle_id;
1105
1106 /* We are already locked, no need to do it again */
1107
1108 IRDA_DEBUG(1, "%s(), Set Speed for %s , Speed = %d\n", __FUNCTION__ , dongle_types[dongle_id], speed);
1109
1110 switch_bank(iobase, BANK2);
1111 tmp = inb(iobase+FIR_IRDA_CR);
1112
1113 /* IBM type dongle */
1114 if(dongle_id == 0)
1115 {
1116 if(speed == 4000000)
1117 {
1118 // __ __
1119 // SD/MODE __| |__ __
1120 // __ __
1121 // IRTX __ __| |__
1122 // T1 T2 T3 T4 T5
1123
1124 tmp &= ~IRDA_CR_HDLC; // HDLC=0
1125 tmp |= IRDA_CR_CRC; // CRC=1
1126
1127 switch_bank(iobase, BANK2);
1128 outb(tmp, iobase+FIR_IRDA_CR);
1129
1130 // T1 -> SD/MODE:0 IRTX:0
1131 tmp &= ~0x09;
1132 tmp |= 0x02;
1133 outb(tmp, iobase+FIR_IRDA_CR);
1134 udelay(2);
1135
1136 // T2 -> SD/MODE:1 IRTX:0
1137 tmp &= ~0x01;
1138 tmp |= 0x0a;
1139 outb(tmp, iobase+FIR_IRDA_CR);
1140 udelay(2);
1141
1142 // T3 -> SD/MODE:1 IRTX:1
1143 tmp |= 0x0b;
1144 outb(tmp, iobase+FIR_IRDA_CR);
1145 udelay(2);
1146
1147 // T4 -> SD/MODE:0 IRTX:1
1148 tmp &= ~0x08;
1149 tmp |= 0x03;
1150 outb(tmp, iobase+FIR_IRDA_CR);
1151 udelay(2);
1152
1153 // T5 -> SD/MODE:0 IRTX:0
1154 tmp &= ~0x09;
1155 tmp |= 0x02;
1156 outb(tmp, iobase+FIR_IRDA_CR);
1157 udelay(2);
1158
1159 // reset -> Normal TX output Signal
1160 outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
1161 }
1162 else /* speed <=1152000 */
1163 {
1164 // __
1165 // SD/MODE __| |__
1166 //
1167 // IRTX ________
1168 // T1 T2 T3
1169
1170 /* MIR 115200, 57600 */
1171 if (speed==1152000)
1172 {
1173 tmp |= 0xA0; //HDLC=1, 1.152Mbps=1
1174 }
1175 else
1176 {
1177 tmp &=~0x80; //HDLC 0.576Mbps
1178 tmp |= 0x20; //HDLC=1,
1179 }
1180
1181 tmp |= IRDA_CR_CRC; // CRC=1
1182
1183 switch_bank(iobase, BANK2);
1184 outb(tmp, iobase+FIR_IRDA_CR);
1185
1186 /* MIR 115200, 57600 */
1187
1188 //switch_bank(iobase, BANK2);
1189 // T1 -> SD/MODE:0 IRTX:0
1190 tmp &= ~0x09;
1191 tmp |= 0x02;
1192 outb(tmp, iobase+FIR_IRDA_CR);
1193 udelay(2);
1194
1195 // T2 -> SD/MODE:1 IRTX:0
1196 tmp &= ~0x01;
1197 tmp |= 0x0a;
1198 outb(tmp, iobase+FIR_IRDA_CR);
1199
1200 // T3 -> SD/MODE:0 IRTX:0
1201 tmp &= ~0x09;
1202 tmp |= 0x02;
1203 outb(tmp, iobase+FIR_IRDA_CR);
1204 udelay(2);
1205
1206 // reset -> Normal TX output Signal
1207 outb(tmp & ~0x02, iobase+FIR_IRDA_CR);
1208 }
1209 }
1210 else if (dongle_id == 1) /* HP HDSL-3600 */
1211 {
1212 switch(speed)
1213 {
1214 case 4000000:
1215 tmp &= ~IRDA_CR_HDLC; // HDLC=0
1216 break;
1217
1218 case 1152000:
1219 tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
1220 break;
1221
1222 case 576000:
1223 tmp &=~0x80; // HDLC 0.576Mbps
1224 tmp |= 0x20; // HDLC=1,
1225 break;
1226 }
1227
1228 tmp |= IRDA_CR_CRC; // CRC=1
1229
1230 switch_bank(iobase, BANK2);
1231 outb(tmp, iobase+FIR_IRDA_CR);
1232 }
1233 else /* HP HDSL-1100 */
1234 {
1235 if(speed <= 115200) /* SIR */
1236 {
1237
1238 tmp &= ~IRDA_CR_FIR_SIN; // HP sin select = 0
1239
1240 switch_bank(iobase, BANK2);
1241 outb(tmp, iobase+FIR_IRDA_CR);
1242 }
1243 else /* MIR FIR */
1244 {
1245
1246 switch(speed)
1247 {
1248 case 4000000:
1249 tmp &= ~IRDA_CR_HDLC; // HDLC=0
1250 break;
1251
1252 case 1152000:
1253 tmp |= 0xA0; // HDLC=1, 1.152Mbps=1
1254 break;
1255
1256 case 576000:
1257 tmp &=~0x80; // HDLC 0.576Mbps
1258 tmp |= 0x20; // HDLC=1,
1259 break;
1260 }
1261
1262 tmp |= IRDA_CR_CRC; // CRC=1
1263 tmp |= IRDA_CR_FIR_SIN; // HP sin select = 1
1264
1265 switch_bank(iobase, BANK2);
1266 outb(tmp, iobase+FIR_IRDA_CR);
1267 }
1268 }
1269
1270 switch_bank(iobase, BANK0);
1271
1272 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1273}
1274
1275/*
1276 * Function ali_ircc_sir_write (driver)
1277 *
1278 * Fill Tx FIFO with transmit data
1279 *
1280 */
1281static int ali_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
1282{
1283 int actual = 0;
1284
1285 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
1286
1287 /* Tx FIFO should be empty! */
1288 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
1289 IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __FUNCTION__ );
1290 return 0;
1291 }
1292
1293 /* Fill FIFO with current frame */
1294 while ((fifo_size-- > 0) && (actual < len)) {
1295 /* Transmit next byte */
1296 outb(buf[actual], iobase+UART_TX);
1297
1298 actual++;
1299 }
1300
1301 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1302 return actual;
1303}
1304
1305/*
1306 * Function ali_ircc_net_open (dev)
1307 *
1308 * Start the device
1309 *
1310 */
1311static int ali_ircc_net_open(struct net_device *dev)
1312{
1313 struct ali_ircc_cb *self;
1314 int iobase;
1315 char hwname[32];
1316
1317 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
1318
1319 IRDA_ASSERT(dev != NULL, return -1;);
1320
1321 self = (struct ali_ircc_cb *) dev->priv;
1322
1323 IRDA_ASSERT(self != NULL, return 0;);
1324
1325 iobase = self->io.fir_base;
1326
1327 /* Request IRQ and install Interrupt Handler */
1328 if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
1329 {
1330 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1331 self->io.irq);
1332 return -EAGAIN;
1333 }
1334
1335 /*
1336 * Always allocate the DMA channel after the IRQ, and clean up on
1337 * failure.
1338 */
1339 if (request_dma(self->io.dma, dev->name)) {
1340 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1341 self->io.dma);
1342 free_irq(self->io.irq, self);
1343 return -EAGAIN;
1344 }
1345
1346 /* Turn on interrups */
1347 outb(UART_IER_RDI , iobase+UART_IER);
1348
1349 /* Ready to play! */
1350 netif_start_queue(dev); //benjamin by irport
1351
1352 /* Give self a hardware name */
1353 sprintf(hwname, "ALI-FIR @ 0x%03x", self->io.fir_base);
1354
1355 /*
1356 * Open new IrLAP layer instance, now that everything should be
1357 * initialized properly
1358 */
1359 self->irlap = irlap_open(dev, &self->qos, hwname);
1360
1361 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1362
1363 return 0;
1364}
1365
1366/*
1367 * Function ali_ircc_net_close (dev)
1368 *
1369 * Stop the device
1370 *
1371 */
1372static int ali_ircc_net_close(struct net_device *dev)
1373{
1374
1375 struct ali_ircc_cb *self;
1376 //int iobase;
1377
1378 IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
1379
1380 IRDA_ASSERT(dev != NULL, return -1;);
1381
1382 self = (struct ali_ircc_cb *) dev->priv;
1383 IRDA_ASSERT(self != NULL, return 0;);
1384
1385 /* Stop device */
1386 netif_stop_queue(dev);
1387
1388 /* Stop and remove instance of IrLAP */
1389 if (self->irlap)
1390 irlap_close(self->irlap);
1391 self->irlap = NULL;
1392
1393 disable_dma(self->io.dma);
1394
1395 /* Disable interrupts */
1396 SetCOMInterrupts(self, FALSE);
1397
1398 free_irq(self->io.irq, dev);
1399 free_dma(self->io.dma);
1400
1401 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1402
1403 return 0;
1404}
1405
1406/*
1407 * Function ali_ircc_fir_hard_xmit (skb, dev)
1408 *
1409 * Transmit the frame
1410 *
1411 */
1412static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1413{
1414 struct ali_ircc_cb *self;
1415 unsigned long flags;
1416 int iobase;
1417 __u32 speed;
1418 int mtt, diff;
1419
1420 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
1421
1422 self = (struct ali_ircc_cb *) dev->priv;
1423 iobase = self->io.fir_base;
1424
1425 netif_stop_queue(dev);
1426
1427 /* Make sure tests *& speed change are atomic */
1428 spin_lock_irqsave(&self->lock, flags);
1429
1430 /* Note : you should make sure that speed changes are not going
1431 * to corrupt any outgoing frame. Look at nsc-ircc for the gory
1432 * details - Jean II */
1433
1434 /* Check if we need to change the speed */
1435 speed = irda_get_next_speed(skb);
1436 if ((speed != self->io.speed) && (speed != -1)) {
1437 /* Check for empty frame */
1438 if (!skb->len) {
1439 ali_ircc_change_speed(self, speed);
1440 dev->trans_start = jiffies;
1441 spin_unlock_irqrestore(&self->lock, flags);
1442 dev_kfree_skb(skb);
1443 return 0;
1444 } else
1445 self->new_speed = speed;
1446 }
1447
1448 /* Register and copy this frame to DMA memory */
1449 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
1450 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
1451 self->tx_fifo.tail += skb->len;
1452
1453 self->stats.tx_bytes += skb->len;
1454
1455 memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
1456 skb->len);
1457
1458 self->tx_fifo.len++;
1459 self->tx_fifo.free++;
1460
1461 /* Start transmit only if there is currently no transmit going on */
1462 if (self->tx_fifo.len == 1)
1463 {
1464 /* Check if we must wait the min turn time or not */
1465 mtt = irda_get_mtt(skb);
1466
1467 if (mtt)
1468 {
1469 /* Check how much time we have used already */
1470 do_gettimeofday(&self->now);
1471
1472 diff = self->now.tv_usec - self->stamp.tv_usec;
1473 /* self->stamp is set from ali_ircc_dma_receive_complete() */
1474
1475 IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __FUNCTION__ , diff);
1476
1477 if (diff < 0)
1478 diff += 1000000;
1479
1480 /* Check if the mtt is larger than the time we have
1481 * already used by all the protocol processing
1482 */
1483 if (mtt > diff)
1484 {
1485 mtt -= diff;
1486
1487 /*
1488 * Use timer if delay larger than 1000 us, and
1489 * use udelay for smaller values which should
1490 * be acceptable
1491 */
1492 if (mtt > 500)
1493 {
1494 /* Adjust for timer resolution */
1495 mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */
1496
1497 IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __FUNCTION__ , mtt);
1498
1499 /* Setup timer */
1500 if (mtt == 1) /* 500 us */
1501 {
1502 switch_bank(iobase, BANK1);
1503 outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR);
1504 }
1505 else if (mtt == 2) /* 1 ms */
1506 {
1507 switch_bank(iobase, BANK1);
1508 outb(TIMER_IIR_1ms, iobase+FIR_TIMER_IIR);
1509 }
1510 else /* > 2ms -> 4ms */
1511 {
1512 switch_bank(iobase, BANK1);
1513 outb(TIMER_IIR_2ms, iobase+FIR_TIMER_IIR);
1514 }
1515
1516
1517 /* Start timer */
1518 outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
1519 self->io.direction = IO_XMIT;
1520
1521 /* Enable timer interrupt */
1522 self->ier = IER_TIMER;
1523 SetCOMInterrupts(self, TRUE);
1524
1525 /* Timer will take care of the rest */
1526 goto out;
1527 }
1528 else
1529 udelay(mtt);
1530 } // if (if (mtt > diff)
1531 }// if (mtt)
1532
1533 /* Enable EOM interrupt */
1534 self->ier = IER_EOM;
1535 SetCOMInterrupts(self, TRUE);
1536
1537 /* Transmit frame */
1538 ali_ircc_dma_xmit(self);
1539 } // if (self->tx_fifo.len == 1)
1540
1541 out:
1542
1543 /* Not busy transmitting anymore if window is not full */
1544 if (self->tx_fifo.free < MAX_TX_WINDOW)
1545 netif_wake_queue(self->netdev);
1546
1547 /* Restore bank register */
1548 switch_bank(iobase, BANK0);
1549
1550 dev->trans_start = jiffies;
1551 spin_unlock_irqrestore(&self->lock, flags);
1552 dev_kfree_skb(skb);
1553
1554 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1555 return 0;
1556}
1557
1558
1559static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1560{
1561 int iobase, tmp;
1562 unsigned char FIFO_OPTI, Hi, Lo;
1563
1564
1565 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
1566
1567 iobase = self->io.fir_base;
1568
1569 /* FIFO threshold , this method comes from NDIS5 code */
1570
1571 if(self->tx_fifo.queue[self->tx_fifo.ptr].len < TX_FIFO_Threshold)
1572 FIFO_OPTI = self->tx_fifo.queue[self->tx_fifo.ptr].len-1;
1573 else
1574 FIFO_OPTI = TX_FIFO_Threshold;
1575
1576 /* Disable DMA */
1577 switch_bank(iobase, BANK1);
1578 outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
1579
1580 self->io.direction = IO_XMIT;
1581
1582 irda_setup_dma(self->io.dma,
1583 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
1584 self->tx_buff.head) + self->tx_buff_dma,
1585 self->tx_fifo.queue[self->tx_fifo.ptr].len,
1586 DMA_TX_MODE);
1587
1588 /* Reset Tx FIFO */
1589 switch_bank(iobase, BANK0);
1590 outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
1591
1592 /* Set Tx FIFO threshold */
1593 if (self->fifo_opti_buf!=FIFO_OPTI)
1594 {
1595 switch_bank(iobase, BANK1);
1596 outb(FIFO_OPTI, iobase+FIR_FIFO_TR) ;
1597 self->fifo_opti_buf=FIFO_OPTI;
1598 }
1599
1600 /* Set Tx DMA threshold */
1601 switch_bank(iobase, BANK1);
1602 outb(TX_DMA_Threshold, iobase+FIR_DMA_TR);
1603
1604 /* Set max Tx frame size */
1605 Hi = (self->tx_fifo.queue[self->tx_fifo.ptr].len >> 8) & 0x0f;
1606 Lo = self->tx_fifo.queue[self->tx_fifo.ptr].len & 0xff;
1607 switch_bank(iobase, BANK2);
1608 outb(Hi, iobase+FIR_TX_DSR_HI);
1609 outb(Lo, iobase+FIR_TX_DSR_LO);
1610
1611 /* Disable SIP , Disable Brick Wall (we don't support in TX mode), Change to TX mode */
1612 switch_bank(iobase, BANK0);
1613 tmp = inb(iobase+FIR_LCR_B);
1614 tmp &= ~0x20; // Disable SIP
1615 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
1616 IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __FUNCTION__ , inb(iobase+FIR_LCR_B));
1617
1618 outb(0, iobase+FIR_LSR);
1619
1620 /* Enable DMA and Burst Mode */
1621 switch_bank(iobase, BANK1);
1622 outb(inb(iobase+FIR_CR) | CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
1623
1624 switch_bank(iobase, BANK0);
1625
1626 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1627}
1628
1629static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
1630{
1631 int iobase;
1632 int ret = TRUE;
1633
1634 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
1635
1636 iobase = self->io.fir_base;
1637
1638 /* Disable DMA */
1639 switch_bank(iobase, BANK1);
1640 outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
1641
1642 /* Check for underrun! */
1643 switch_bank(iobase, BANK0);
1644 if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
1645
1646 {
1647 IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __FUNCTION__);
1648 self->stats.tx_errors++;
1649 self->stats.tx_fifo_errors++;
1650 }
1651 else
1652 {
1653 self->stats.tx_packets++;
1654 }
1655
1656 /* Check if we need to change the speed */
1657 if (self->new_speed)
1658 {
1659 ali_ircc_change_speed(self, self->new_speed);
1660 self->new_speed = 0;
1661 }
1662
1663 /* Finished with this frame, so prepare for next */
1664 self->tx_fifo.ptr++;
1665 self->tx_fifo.len--;
1666
1667 /* Any frames to be sent back-to-back? */
1668 if (self->tx_fifo.len)
1669 {
1670 ali_ircc_dma_xmit(self);
1671
1672 /* Not finished yet! */
1673 ret = FALSE;
1674 }
1675 else
1676 { /* Reset Tx FIFO info */
1677 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1678 self->tx_fifo.tail = self->tx_buff.head;
1679 }
1680
1681 /* Make sure we have room for more frames */
1682 if (self->tx_fifo.free < MAX_TX_WINDOW) {
1683 /* Not busy transmitting anymore */
1684 /* Tell the network layer, that we can accept more frames */
1685 netif_wake_queue(self->netdev);
1686 }
1687
1688 switch_bank(iobase, BANK0);
1689
1690 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1691 return ret;
1692}
1693
1694/*
1695 * Function ali_ircc_dma_receive (self)
1696 *
1697 * Get ready for receiving a frame. The device will initiate a DMA
1698 * if it starts to receive a frame.
1699 *
1700 */
1701static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1702{
1703 int iobase, tmp;
1704
1705 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
1706
1707 iobase = self->io.fir_base;
1708
1709 /* Reset Tx FIFO info */
1710 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1711 self->tx_fifo.tail = self->tx_buff.head;
1712
1713 /* Disable DMA */
1714 switch_bank(iobase, BANK1);
1715 outb(inb(iobase+FIR_CR) & ~CR_DMA_EN, iobase+FIR_CR);
1716
1717 /* Reset Message Count */
1718 switch_bank(iobase, BANK0);
1719 outb(0x07, iobase+FIR_LSR);
1720
1721 self->rcvFramesOverflow = FALSE;
1722
1723 self->LineStatus = inb(iobase+FIR_LSR) ;
1724
1725 /* Reset Rx FIFO info */
1726 self->io.direction = IO_RECV;
1727 self->rx_buff.data = self->rx_buff.head;
1728
1729 /* Reset Rx FIFO */
1730 // switch_bank(iobase, BANK0);
1731 outb(LCR_A_FIFO_RESET, iobase+FIR_LCR_A);
1732
1733 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1734 self->st_fifo.tail = self->st_fifo.head = 0;
1735
1736 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
1737 DMA_RX_MODE);
1738
1739 /* Set Receive Mode,Brick Wall */
1740 //switch_bank(iobase, BANK0);
1741 tmp = inb(iobase+FIR_LCR_B);
1742 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
1743 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __FUNCTION__ , inb(iobase+FIR_LCR_B));
1744
1745 /* Set Rx Threshold */
1746 switch_bank(iobase, BANK1);
1747 outb(RX_FIFO_Threshold, iobase+FIR_FIFO_TR);
1748 outb(RX_DMA_Threshold, iobase+FIR_DMA_TR);
1749
1750 /* Enable DMA and Burst Mode */
1751 // switch_bank(iobase, BANK1);
1752 outb(CR_DMA_EN | CR_DMA_BURST, iobase+FIR_CR);
1753
1754 switch_bank(iobase, BANK0);
1755 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1756 return 0;
1757}
1758
1759static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1760{
1761 struct st_fifo *st_fifo;
1762 struct sk_buff *skb;
1763 __u8 status, MessageCount;
1764 int len, i, iobase, val;
1765
1766 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
1767
1768 st_fifo = &self->st_fifo;
1769 iobase = self->io.fir_base;
1770
1771 switch_bank(iobase, BANK0);
1772 MessageCount = inb(iobase+ FIR_LSR)&0x07;
1773
1774 if (MessageCount > 0)
1775 IRDA_DEBUG(0, "%s(), Messsage count = %d,\n", __FUNCTION__ , MessageCount);
1776
1777 for (i=0; i<=MessageCount; i++)
1778 {
1779 /* Bank 0 */
1780 switch_bank(iobase, BANK0);
1781 status = inb(iobase+FIR_LSR);
1782
1783 switch_bank(iobase, BANK2);
1784 len = inb(iobase+FIR_RX_DSR_HI) & 0x0f;
1785 len = len << 8;
1786 len |= inb(iobase+FIR_RX_DSR_LO);
1787
1788 IRDA_DEBUG(1, "%s(), RX Length = 0x%.2x,\n", __FUNCTION__ , len);
1789 IRDA_DEBUG(1, "%s(), RX Status = 0x%.2x,\n", __FUNCTION__ , status);
1790
1791 if (st_fifo->tail >= MAX_RX_WINDOW) {
1792 IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__ );
1793 continue;
1794 }
1795
1796 st_fifo->entries[st_fifo->tail].status = status;
1797 st_fifo->entries[st_fifo->tail].len = len;
1798 st_fifo->pending_bytes += len;
1799 st_fifo->tail++;
1800 st_fifo->len++;
1801 }
1802
1803 for (i=0; i<=MessageCount; i++)
1804 {
1805 /* Get first entry */
1806 status = st_fifo->entries[st_fifo->head].status;
1807 len = st_fifo->entries[st_fifo->head].len;
1808 st_fifo->pending_bytes -= len;
1809 st_fifo->head++;
1810 st_fifo->len--;
1811
1812 /* Check for errors */
1813 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
1814 {
1815 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __FUNCTION__ );
1816
1817 /* Skip frame */
1818 self->stats.rx_errors++;
1819
1820 self->rx_buff.data += len;
1821
1822 if (status & LSR_FIFO_UR)
1823 {
1824 self->stats.rx_frame_errors++;
1825 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __FUNCTION__ );
1826 }
1827 if (status & LSR_FRAME_ERROR)
1828 {
1829 self->stats.rx_frame_errors++;
1830 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __FUNCTION__ );
1831 }
1832
1833 if (status & LSR_CRC_ERROR)
1834 {
1835 self->stats.rx_crc_errors++;
1836 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __FUNCTION__ );
1837 }
1838
1839 if(self->rcvFramesOverflow)
1840 {
1841 self->stats.rx_frame_errors++;
1842 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __FUNCTION__ );
1843 }
1844 if(len == 0)
1845 {
1846 self->stats.rx_frame_errors++;
1847 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __FUNCTION__ );
1848 }
1849 }
1850 else
1851 {
1852
1853 if (st_fifo->pending_bytes < 32)
1854 {
1855 switch_bank(iobase, BANK0);
1856 val = inb(iobase+FIR_BSR);
1857 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
1858 {
1859 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __FUNCTION__ );
1860
1861 /* Put this entry back in fifo */
1862 st_fifo->head--;
1863 st_fifo->len++;
1864 st_fifo->pending_bytes += len;
1865 st_fifo->entries[st_fifo->head].status = status;
1866 st_fifo->entries[st_fifo->head].len = len;
1867
1868 /*
1869 * DMA not finished yet, so try again
1870 * later, set timer value, resolution
1871 * 500 us
1872 */
1873
1874 switch_bank(iobase, BANK1);
1875 outb(TIMER_IIR_500, iobase+FIR_TIMER_IIR); // 2001/1/2 05:07PM
1876
1877 /* Enable Timer */
1878 outb(inb(iobase+FIR_CR) | CR_TIMER_EN, iobase+FIR_CR);
1879
1880 return FALSE; /* I'll be back! */
1881 }
1882 }
1883
1884 /*
1885 * Remember the time we received this frame, so we can
1886 * reduce the min turn time a bit since we will know
1887 * how much time we have used for protocol processing
1888 */
1889 do_gettimeofday(&self->stamp);
1890
1891 skb = dev_alloc_skb(len+1);
1892 if (skb == NULL)
1893 {
1894 IRDA_WARNING("%s(), memory squeeze, "
1895 "dropping frame.\n",
1896 __FUNCTION__);
1897 self->stats.rx_dropped++;
1898
1899 return FALSE;
1900 }
1901
1902 /* Make sure IP header gets aligned */
1903 skb_reserve(skb, 1);
1904
1905 /* Copy frame without CRC, CRC is removed by hardware*/
1906 skb_put(skb, len);
1907 memcpy(skb->data, self->rx_buff.data, len);
1908
1909 /* Move to next frame */
1910 self->rx_buff.data += len;
1911 self->stats.rx_bytes += len;
1912 self->stats.rx_packets++;
1913
1914 skb->dev = self->netdev;
1915 skb->mac.raw = skb->data;
1916 skb->protocol = htons(ETH_P_IRDA);
1917 netif_rx(skb);
1918 self->netdev->last_rx = jiffies;
1919 }
1920 }
1921
1922 switch_bank(iobase, BANK0);
1923
1924 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1925 return TRUE;
1926}
1927
1928
1929
1930/*
1931 * Function ali_ircc_sir_hard_xmit (skb, dev)
1932 *
1933 * Transmit the frame!
1934 *
1935 */
1936static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1937{
1938 struct ali_ircc_cb *self;
1939 unsigned long flags;
1940 int iobase;
1941 __u32 speed;
1942
1943 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
1944
1945 IRDA_ASSERT(dev != NULL, return 0;);
1946
1947 self = (struct ali_ircc_cb *) dev->priv;
1948 IRDA_ASSERT(self != NULL, return 0;);
1949
1950 iobase = self->io.sir_base;
1951
1952 netif_stop_queue(dev);
1953
1954 /* Make sure tests *& speed change are atomic */
1955 spin_lock_irqsave(&self->lock, flags);
1956
1957 /* Note : you should make sure that speed changes are not going
1958 * to corrupt any outgoing frame. Look at nsc-ircc for the gory
1959 * details - Jean II */
1960
1961 /* Check if we need to change the speed */
1962 speed = irda_get_next_speed(skb);
1963 if ((speed != self->io.speed) && (speed != -1)) {
1964 /* Check for empty frame */
1965 if (!skb->len) {
1966 ali_ircc_change_speed(self, speed);
1967 dev->trans_start = jiffies;
1968 spin_unlock_irqrestore(&self->lock, flags);
1969 dev_kfree_skb(skb);
1970 return 0;
1971 } else
1972 self->new_speed = speed;
1973 }
1974
1975 /* Init tx buffer */
1976 self->tx_buff.data = self->tx_buff.head;
1977
1978 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
1979 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
1980 self->tx_buff.truesize);
1981
1982 self->stats.tx_bytes += self->tx_buff.len;
1983
1984 /* Turn on transmit finished interrupt. Will fire immediately! */
1985 outb(UART_IER_THRI, iobase+UART_IER);
1986
1987 dev->trans_start = jiffies;
1988 spin_unlock_irqrestore(&self->lock, flags);
1989
1990 dev_kfree_skb(skb);
1991
1992 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
1993
1994 return 0;
1995}
1996
1997
1998/*
1999 * Function ali_ircc_net_ioctl (dev, rq, cmd)
2000 *
2001 * Process IOCTL commands for this device
2002 *
2003 */
2004static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2005{
2006 struct if_irda_req *irq = (struct if_irda_req *) rq;
2007 struct ali_ircc_cb *self;
2008 unsigned long flags;
2009 int ret = 0;
2010
2011 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
2012
2013 IRDA_ASSERT(dev != NULL, return -1;);
2014
2015 self = dev->priv;
2016
2017 IRDA_ASSERT(self != NULL, return -1;);
2018
2019 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd);
2020
2021 switch (cmd) {
2022 case SIOCSBANDWIDTH: /* Set bandwidth */
2023 IRDA_DEBUG(1, "%s(), SIOCSBANDWIDTH\n", __FUNCTION__ );
2024 /*
2025 * This function will also be used by IrLAP to change the
2026 * speed, so we still must allow for speed change within
2027 * interrupt context.
2028 */
2029 if (!in_interrupt() && !capable(CAP_NET_ADMIN))
2030 return -EPERM;
2031
2032 spin_lock_irqsave(&self->lock, flags);
2033 ali_ircc_change_speed(self, irq->ifr_baudrate);
2034 spin_unlock_irqrestore(&self->lock, flags);
2035 break;
2036 case SIOCSMEDIABUSY: /* Set media busy */
2037 IRDA_DEBUG(1, "%s(), SIOCSMEDIABUSY\n", __FUNCTION__ );
2038 if (!capable(CAP_NET_ADMIN))
2039 return -EPERM;
2040 irda_device_set_media_busy(self->netdev, TRUE);
2041 break;
2042 case SIOCGRECEIVING: /* Check if we are receiving right now */
2043 IRDA_DEBUG(2, "%s(), SIOCGRECEIVING\n", __FUNCTION__ );
2044 /* This is protected */
2045 irq->ifr_receiving = ali_ircc_is_receiving(self);
2046 break;
2047 default:
2048 ret = -EOPNOTSUPP;
2049 }
2050
2051 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
2052
2053 return ret;
2054}
2055
2056/*
2057 * Function ali_ircc_is_receiving (self)
2058 *
2059 * Return TRUE is we are currently receiving a frame
2060 *
2061 */
2062static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2063{
2064 unsigned long flags;
2065 int status = FALSE;
2066 int iobase;
2067
2068 IRDA_DEBUG(2, "%s(), ---------------- Start -----------------\n", __FUNCTION__ );
2069
2070 IRDA_ASSERT(self != NULL, return FALSE;);
2071
2072 spin_lock_irqsave(&self->lock, flags);
2073
2074 if (self->io.speed > 115200)
2075 {
2076 iobase = self->io.fir_base;
2077
2078 switch_bank(iobase, BANK1);
2079 if((inb(iobase+FIR_FIFO_FR) & 0x3f) != 0)
2080 {
2081 /* We are receiving something */
2082 IRDA_DEBUG(1, "%s(), We are receiving something\n", __FUNCTION__ );
2083 status = TRUE;
2084 }
2085 switch_bank(iobase, BANK0);
2086 }
2087 else
2088 {
2089 status = (self->rx_buff.state != OUTSIDE_FRAME);
2090 }
2091
2092 spin_unlock_irqrestore(&self->lock, flags);
2093
2094 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
2095
2096 return status;
2097}
2098
2099static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
2100{
2101 struct ali_ircc_cb *self = (struct ali_ircc_cb *) dev->priv;
2102
2103 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
2104
2105 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
2106
2107 return &self->stats;
2108}
2109
2110static void ali_ircc_suspend(struct ali_ircc_cb *self)
2111{
2112 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
2113
2114 IRDA_MESSAGE("%s, Suspending\n", driver_name);
2115
2116 if (self->io.suspended)
2117 return;
2118
2119 ali_ircc_net_close(self->netdev);
2120
2121 self->io.suspended = 1;
2122
2123 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
2124}
2125
2126static void ali_ircc_wakeup(struct ali_ircc_cb *self)
2127{
2128 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
2129
2130 if (!self->io.suspended)
2131 return;
2132
2133 ali_ircc_net_open(self->netdev);
2134
2135 IRDA_MESSAGE("%s, Waking up\n", driver_name);
2136
2137 self->io.suspended = 0;
2138
2139 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
2140}
2141
2142static int ali_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
2143{
2144 struct ali_ircc_cb *self = (struct ali_ircc_cb*) dev->data;
2145
2146 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
2147
2148 if (self) {
2149 switch (rqst) {
2150 case PM_SUSPEND:
2151 ali_ircc_suspend(self);
2152 break;
2153 case PM_RESUME:
2154 ali_ircc_wakeup(self);
2155 break;
2156 }
2157 }
2158
2159 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
2160
2161 return 0;
2162}
2163
2164
2165/* ALi Chip Function */
2166
2167static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
2168{
2169
2170 unsigned char newMask;
2171
2172 int iobase = self->io.fir_base; /* or sir_base */
2173
2174 IRDA_DEBUG(2, "%s(), -------- Start -------- ( Enable = %d )\n", __FUNCTION__ , enable);
2175
2176 /* Enable the interrupt which we wish to */
2177 if (enable){
2178 if (self->io.direction == IO_XMIT)
2179 {
2180 if (self->io.speed > 115200) /* FIR, MIR */
2181 {
2182 newMask = self->ier;
2183 }
2184 else /* SIR */
2185 {
2186 newMask = UART_IER_THRI | UART_IER_RDI;
2187 }
2188 }
2189 else {
2190 if (self->io.speed > 115200) /* FIR, MIR */
2191 {
2192 newMask = self->ier;
2193 }
2194 else /* SIR */
2195 {
2196 newMask = UART_IER_RDI;
2197 }
2198 }
2199 }
2200 else /* Disable all the interrupts */
2201 {
2202 newMask = 0x00;
2203
2204 }
2205
2206 //SIR and FIR has different registers
2207 if (self->io.speed > 115200)
2208 {
2209 switch_bank(iobase, BANK0);
2210 outb(newMask, iobase+FIR_IER);
2211 }
2212 else
2213 outb(newMask, iobase+UART_IER);
2214
2215 IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
2216}
2217
2218static void SIR2FIR(int iobase)
2219{
2220 //unsigned char tmp;
2221
2222 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
2223
2224 /* Already protected (change_speed() or setup()), no need to lock.
2225 * Jean II */
2226
2227 outb(0x28, iobase+UART_MCR);
2228 outb(0x68, iobase+UART_MCR);
2229 outb(0x88, iobase+UART_MCR);
2230
2231 outb(0x60, iobase+FIR_MCR); /* Master Reset */
2232 outb(0x20, iobase+FIR_MCR); /* Master Interrupt Enable */
2233
2234 //tmp = inb(iobase+FIR_LCR_B); /* SIP enable */
2235 //tmp |= 0x20;
2236 //outb(tmp, iobase+FIR_LCR_B);
2237
2238 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
2239}
2240
2241static void FIR2SIR(int iobase)
2242{
2243 unsigned char val;
2244
2245 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__ );
2246
2247 /* Already protected (change_speed() or setup()), no need to lock.
2248 * Jean II */
2249
2250 outb(0x20, iobase+FIR_MCR); /* IRQ to low */
2251 outb(0x00, iobase+UART_IER);
2252
2253 outb(0xA0, iobase+FIR_MCR); /* Don't set master reset */
2254 outb(0x00, iobase+UART_FCR);
2255 outb(0x07, iobase+UART_FCR);
2256
2257 val = inb(iobase+UART_RX);
2258 val = inb(iobase+UART_LSR);
2259 val = inb(iobase+UART_MSR);
2260
2261 IRDA_DEBUG(1, "%s(), ----------------- End ------------------\n", __FUNCTION__ );
2262}
2263
2264MODULE_AUTHOR("Benjamin Kong <benjamin_kong@ali.com.tw>");
2265MODULE_DESCRIPTION("ALi FIR Controller Driver");
2266MODULE_LICENSE("GPL");
2267
2268
2269module_param_array(io, int, NULL, 0);
2270MODULE_PARM_DESC(io, "Base I/O addresses");
2271module_param_array(irq, int, NULL, 0);
2272MODULE_PARM_DESC(irq, "IRQ lines");
2273module_param_array(dma, int, NULL, 0);
2274MODULE_PARM_DESC(dma, "DMA channels");
2275
2276module_init(ali_ircc_init);
2277module_exit(ali_ircc_cleanup);
diff --git a/drivers/net/irda/ali-ircc.h b/drivers/net/irda/ali-ircc.h
new file mode 100644
index 000000000000..e489c6661ee8
--- /dev/null
+++ b/drivers/net/irda/ali-ircc.h
@@ -0,0 +1,231 @@
1/*********************************************************************
2 *
3 * Filename: ali-ircc.h
4 * Version: 0.5
5 * Description: Driver for the ALI M1535D and M1543C FIR Controller
6 * Status: Experimental.
7 * Author: Benjamin Kong <benjamin_kong@ali.com.tw>
8 * Created at: 2000/10/16 03:46PM
9 * Modified at: 2001/1/3 02:56PM
10 * Modified by: Benjamin Kong <benjamin_kong@ali.com.tw>
11 *
12 * Copyright (c) 2000 Benjamin Kong <benjamin_kong@ali.com.tw>
13 * All Rights Reserved
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 ********************************************************************/
21
22#ifndef ALI_IRCC_H
23#define ALI_IRCC_H
24
25#include <linux/time.h>
26
27#include <linux/spinlock.h>
28#include <linux/pm.h>
29#include <linux/types.h>
30#include <asm/io.h>
31
32/* SIR Register */
33/* Usr definition of linux/serial_reg.h */
34
35/* FIR Register */
36#define BANK0 0x20
37#define BANK1 0x21
38#define BANK2 0x22
39#define BANK3 0x23
40
41#define FIR_MCR 0x07 /* Master Control Register */
42
43/* Bank 0 */
44#define FIR_DR 0x00 /* Alias 0, FIR Data Register (R/W) */
45#define FIR_IER 0x01 /* Alias 1, FIR Interrupt Enable Register (R/W) */
46#define FIR_IIR 0x02 /* Alias 2, FIR Interrupt Identification Register (Read only) */
47#define FIR_LCR_A 0x03 /* Alias 3, FIR Line Control Register A (R/W) */
48#define FIR_LCR_B 0x04 /* Alias 4, FIR Line Control Register B (R/W) */
49#define FIR_LSR 0x05 /* Alias 5, FIR Line Status Register (R/W) */
50#define FIR_BSR 0x06 /* Alias 6, FIR Bus Status Register (Read only) */
51
52
53 /* Alias 1 */
54 #define IER_FIFO 0x10 /* FIR FIFO Interrupt Enable */
55 #define IER_TIMER 0x20 /* Timer Interrupt Enable */
56 #define IER_EOM 0x40 /* End of Message Interrupt Enable */
57 #define IER_ACT 0x80 /* Active Frame Interrupt Enable */
58
59 /* Alias 2 */
60 #define IIR_FIFO 0x10 /* FIR FIFO Interrupt */
61 #define IIR_TIMER 0x20 /* Timer Interrupt */
62 #define IIR_EOM 0x40 /* End of Message Interrupt */
63 #define IIR_ACT 0x80 /* Active Frame Interrupt */
64
65 /* Alias 3 */
66 #define LCR_A_FIFO_RESET 0x80 /* FIFO Reset */
67
68 /* Alias 4 */
69 #define LCR_B_BW 0x10 /* Brick Wall */
70 #define LCR_B_SIP 0x20 /* SIP Enable */
71 #define LCR_B_TX_MODE 0x40 /* Transmit Mode */
72 #define LCR_B_RX_MODE 0x80 /* Receive Mode */
73
74 /* Alias 5 */
75 #define LSR_FIR_LSA 0x00 /* FIR Line Status Address */
76 #define LSR_FRAME_ABORT 0x08 /* Frame Abort */
77 #define LSR_CRC_ERROR 0x10 /* CRC Error */
78 #define LSR_SIZE_ERROR 0x20 /* Size Error */
79 #define LSR_FRAME_ERROR 0x40 /* Frame Error */
80 #define LSR_FIFO_UR 0x80 /* FIFO Underrun */
81 #define LSR_FIFO_OR 0x80 /* FIFO Overrun */
82
83 /* Alias 6 */
84 #define BSR_FIFO_NOT_EMPTY 0x80 /* FIFO Not Empty */
85
86/* Bank 1 */
87#define FIR_CR 0x00 /* Alias 0, FIR Configuration Register (R/W) */
88#define FIR_FIFO_TR 0x01 /* Alias 1, FIR FIFO Threshold Register (R/W) */
89#define FIR_DMA_TR 0x02 /* Alias 2, FIR DMA Threshold Register (R/W) */
90#define FIR_TIMER_IIR 0x03 /* Alias 3, FIR Timer interrupt interval register (W/O) */
91#define FIR_FIFO_FR 0x03 /* Alias 3, FIR FIFO Flag register (R/O) */
92#define FIR_FIFO_RAR 0x04 /* Alias 4, FIR FIFO Read Address register (R/O) */
93#define FIR_FIFO_WAR 0x05 /* Alias 5, FIR FIFO Write Address register (R/O) */
94#define FIR_TR 0x06 /* Alias 6, Test REgister (W/O) */
95
96 /* Alias 0 */
97 #define CR_DMA_EN 0x01 /* DMA Enable */
98 #define CR_DMA_BURST 0x02 /* DMA Burst Mode */
99 #define CR_TIMER_EN 0x08 /* Timer Enable */
100
101 /* Alias 3 */
102 #define TIMER_IIR_500 0x00 /* 500 us */
103 #define TIMER_IIR_1ms 0x01 /* 1 ms */
104 #define TIMER_IIR_2ms 0x02 /* 2 ms */
105 #define TIMER_IIR_4ms 0x03 /* 4 ms */
106
107/* Bank 2 */
108#define FIR_IRDA_CR 0x00 /* Alias 0, IrDA Control Register (R/W) */
109#define FIR_BOF_CR 0x01 /* Alias 1, BOF Count Register (R/W) */
110#define FIR_BW_CR 0x02 /* Alias 2, Brick Wall Count Register (R/W) */
111#define FIR_TX_DSR_HI 0x03 /* Alias 3, TX Data Size Register (high) (R/W) */
112#define FIR_TX_DSR_LO 0x04 /* Alias 4, TX Data Size Register (low) (R/W) */
113#define FIR_RX_DSR_HI 0x05 /* Alias 5, RX Data Size Register (high) (R/W) */
114#define FIR_RX_DSR_LO 0x06 /* Alias 6, RX Data Size Register (low) (R/W) */
115
116 /* Alias 0 */
117 #define IRDA_CR_HDLC1152 0x80 /* 1.152Mbps HDLC Select */
118 #define IRDA_CR_CRC 0X40 /* CRC Select. */
119 #define IRDA_CR_HDLC 0x20 /* HDLC select. */
120 #define IRDA_CR_HP_MODE 0x10 /* HP mode (read only) */
121 #define IRDA_CR_SD_ST 0x08 /* SD/MODE State. */
122 #define IRDA_CR_FIR_SIN 0x04 /* FIR SIN Select. */
123 #define IRDA_CR_ITTX_0 0x02 /* SOUT State. IRTX force to 0 */
124 #define IRDA_CR_ITTX_1 0x03 /* SOUT State. IRTX force to 1 */
125
126/* Bank 3 */
127#define FIR_ID_VR 0x00 /* Alias 0, FIR ID Version Register (R/O) */
128#define FIR_MODULE_CR 0x01 /* Alias 1, FIR Module Control Register (R/W) */
129#define FIR_IO_BASE_HI 0x02 /* Alias 2, FIR Higher I/O Base Address Register (R/O) */
130#define FIR_IO_BASE_LO 0x03 /* Alias 3, FIR Lower I/O Base Address Register (R/O) */
131#define FIR_IRQ_CR 0x04 /* Alias 4, FIR IRQ Channel Register (R/O) */
132#define FIR_DMA_CR 0x05 /* Alias 5, FIR DMA Channel Register (R/O) */
133
134struct ali_chip {
135 char *name;
136 int cfg[2];
137 unsigned char entr1;
138 unsigned char entr2;
139 unsigned char cid_index;
140 unsigned char cid_value;
141 int (*probe)(struct ali_chip *chip, chipio_t *info);
142 int (*init)(struct ali_chip *chip, chipio_t *info);
143};
144typedef struct ali_chip ali_chip_t;
145
146
147/* DMA modes needed */
148#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
149#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
150
151#define MAX_TX_WINDOW 7
152#define MAX_RX_WINDOW 7
153
154#define TX_FIFO_Threshold 8
155#define RX_FIFO_Threshold 1
156#define TX_DMA_Threshold 1
157#define RX_DMA_Threshold 1
158
159/* For storing entries in the status FIFO */
160
161struct st_fifo_entry {
162 int status;
163 int len;
164};
165
166struct st_fifo {
167 struct st_fifo_entry entries[MAX_RX_WINDOW];
168 int pending_bytes;
169 int head;
170 int tail;
171 int len;
172};
173
174struct frame_cb {
175 void *start; /* Start of frame in DMA mem */
176 int len; /* Lenght of frame in DMA mem */
177};
178
179struct tx_fifo {
180 struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
181 int ptr; /* Currently being sent */
182 int len; /* Lenght of queue */
183 int free; /* Next free slot */
184 void *tail; /* Next free start in DMA mem */
185};
186
187/* Private data for each instance */
188struct ali_ircc_cb {
189
190 struct st_fifo st_fifo; /* Info about received frames */
191 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
192
193 struct net_device *netdev; /* Yes! we are some kind of netdevice */
194 struct net_device_stats stats;
195
196 struct irlap_cb *irlap; /* The link layer we are binded to */
197 struct qos_info qos; /* QoS capabilities for this device */
198
199 chipio_t io; /* IrDA controller information */
200 iobuff_t tx_buff; /* Transmit buffer */
201 iobuff_t rx_buff; /* Receive buffer */
202 dma_addr_t tx_buff_dma;
203 dma_addr_t rx_buff_dma;
204
205 __u8 ier; /* Interrupt enable register */
206
207 __u8 InterruptID; /* Interrupt ID */
208 __u8 BusStatus; /* Bus Status */
209 __u8 LineStatus; /* Line Status */
210
211 unsigned char rcvFramesOverflow;
212
213 struct timeval stamp;
214 struct timeval now;
215
216 spinlock_t lock; /* For serializing operations */
217
218 __u32 new_speed;
219 int index; /* Instance index */
220
221 unsigned char fifo_opti_buf;
222
223 struct pm_dev *dev;
224};
225
226static inline void switch_bank(int iobase, int bank)
227{
228 outb(bank, iobase+FIR_MCR);
229}
230
231#endif /* ALI_IRCC_H */
diff --git a/drivers/net/irda/au1000_ircc.h b/drivers/net/irda/au1000_ircc.h
new file mode 100644
index 000000000000..7a31d4659ed6
--- /dev/null
+++ b/drivers/net/irda/au1000_ircc.h
@@ -0,0 +1,127 @@
1/*
2 *
3 * BRIEF MODULE DESCRIPTION
4 * Au1000 IrDA driver.
5 *
6 * Copyright 2001 MontaVista Software Inc.
7 * Author: MontaVista Software, Inc.
8 * ppopov@mvista.com or source@mvista.com
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
17 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
21 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
22 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 675 Mass Ave, Cambridge, MA 02139, USA.
29 */
30
31#ifndef AU1000_IRCC_H
32#define AU1000_IRCC_H
33
34#include <linux/time.h>
35
36#include <linux/spinlock.h>
37#include <linux/pm.h>
38#include <asm/io.h>
39
40#define NUM_IR_IFF 1
41#define NUM_IR_DESC 64
42#define RING_SIZE_4 0x0
43#define RING_SIZE_16 0x3
44#define RING_SIZE_64 0xF
45#define MAX_NUM_IR_DESC 64
46#define MAX_BUF_SIZE 2048
47
48#define BPS_115200 0
49#define BPS_57600 1
50#define BPS_38400 2
51#define BPS_19200 5
52#define BPS_9600 11
53#define BPS_2400 47
54
55/* Ring descriptor flags */
56#define AU_OWN (1<<7) /* tx,rx */
57
58#define IR_DIS_CRC (1<<6) /* tx */
59#define IR_BAD_CRC (1<<5) /* tx */
60#define IR_NEED_PULSE (1<<4) /* tx */
61#define IR_FORCE_UNDER (1<<3) /* tx */
62#define IR_DISABLE_TX (1<<2) /* tx */
63#define IR_HW_UNDER (1<<0) /* tx */
64#define IR_TX_ERROR (IR_DIS_CRC|IR_BAD_CRC|IR_HW_UNDER)
65
66#define IR_PHY_ERROR (1<<6) /* rx */
67#define IR_CRC_ERROR (1<<5) /* rx */
68#define IR_MAX_LEN (1<<4) /* rx */
69#define IR_FIFO_OVER (1<<3) /* rx */
70#define IR_SIR_ERROR (1<<2) /* rx */
71#define IR_RX_ERROR (IR_PHY_ERROR|IR_CRC_ERROR| \
72 IR_MAX_LEN|IR_FIFO_OVER|IR_SIR_ERROR)
73
74typedef struct db_dest {
75 struct db_dest *pnext;
76 volatile u32 *vaddr;
77 dma_addr_t dma_addr;
78} db_dest_t;
79
80
81typedef struct ring_desc {
82 u8 count_0; /* 7:0 */
83 u8 count_1; /* 12:8 */
84 u8 reserved;
85 u8 flags;
86 u8 addr_0; /* 7:0 */
87 u8 addr_1; /* 15:8 */
88 u8 addr_2; /* 23:16 */
89 u8 addr_3; /* 31:24 */
90} ring_dest_t;
91
92
93/* Private data for each instance */
94struct au1k_private {
95
96 db_dest_t *pDBfree;
97 db_dest_t db[2*NUM_IR_DESC];
98 volatile ring_dest_t *rx_ring[NUM_IR_DESC];
99 volatile ring_dest_t *tx_ring[NUM_IR_DESC];
100 db_dest_t *rx_db_inuse[NUM_IR_DESC];
101 db_dest_t *tx_db_inuse[NUM_IR_DESC];
102 u32 rx_head;
103 u32 tx_head;
104 u32 tx_tail;
105 u32 tx_full;
106
107 iobuff_t rx_buff;
108
109 struct net_device *netdev;
110 struct net_device_stats stats;
111
112 struct timeval stamp;
113 struct timeval now;
114 struct qos_info qos;
115 struct irlap_cb *irlap;
116
117 u8 open;
118 u32 speed;
119 u32 newspeed;
120
121 u32 intr_work_done; /* number of Rx and Tx pkts processed in the isr */
122 struct timer_list timer;
123
124 spinlock_t lock; /* For serializing operations */
125 struct pm_dev *dev;
126};
127#endif /* AU1000_IRCC_H */
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
new file mode 100644
index 000000000000..e6b1985767c2
--- /dev/null
+++ b/drivers/net/irda/au1k_ir.c
@@ -0,0 +1,851 @@
1/*
2 * Alchemy Semi Au1000 IrDA driver
3 *
4 * Copyright 2001 MontaVista Software Inc.
5 * Author: MontaVista Software, Inc.
6 * ppopov@mvista.com or source@mvista.com
7 *
8 * This program is free software; you can distribute it and/or modify it
9 * under the terms of the GNU General Public License (Version 2) as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
20 */
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/init.h>
25#include <linux/errno.h>
26#include <linux/netdevice.h>
27#include <linux/slab.h>
28#include <linux/rtnetlink.h>
29#include <linux/interrupt.h>
30#include <linux/pm.h>
31#include <linux/bitops.h>
32
33#include <asm/irq.h>
34#include <asm/io.h>
35#include <asm/au1000.h>
36#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100)
37#include <asm/pb1000.h>
38#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
39#include <asm/db1x00.h>
40#else
41#error au1k_ir: unsupported board
42#endif
43
44#include <net/irda/irda.h>
45#include <net/irda/irmod.h>
46#include <net/irda/wrapper.h>
47#include <net/irda/irda_device.h>
48#include "au1000_ircc.h"
49
50static int au1k_irda_net_init(struct net_device *);
51static int au1k_irda_start(struct net_device *);
52static int au1k_irda_stop(struct net_device *dev);
53static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *);
54static int au1k_irda_rx(struct net_device *);
55static void au1k_irda_interrupt(int, void *, struct pt_regs *);
56static void au1k_tx_timeout(struct net_device *);
57static struct net_device_stats *au1k_irda_stats(struct net_device *);
58static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
59static int au1k_irda_set_speed(struct net_device *dev, int speed);
60
61static void *dma_alloc(size_t, dma_addr_t *);
62static void dma_free(void *, size_t);
63
64static int qos_mtt_bits = 0x07; /* 1 ms or more */
65static struct net_device *ir_devs[NUM_IR_IFF];
66static char version[] __devinitdata =
67 "au1k_ircc:1.2 ppopov@mvista.com\n";
68
69#define RUN_AT(x) (jiffies + (x))
70
71#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
72static BCSR * const bcsr = (BCSR *)0xAE000000;
73#endif
74
75static DEFINE_SPINLOCK(ir_lock);
76
77/*
78 * IrDA peripheral bug. You have to read the register
79 * twice to get the right value.
80 */
81u32 read_ir_reg(u32 addr)
82{
83 readl(addr);
84 return readl(addr);
85}
86
87
88/*
89 * Buffer allocation/deallocation routines. The buffer descriptor returned
90 * has the virtual and dma address of a buffer suitable for
91 * both, receive and transmit operations.
92 */
93static db_dest_t *GetFreeDB(struct au1k_private *aup)
94{
95 db_dest_t *pDB;
96 pDB = aup->pDBfree;
97
98 if (pDB) {
99 aup->pDBfree = pDB->pnext;
100 }
101 return pDB;
102}
103
104static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB)
105{
106 db_dest_t *pDBfree = aup->pDBfree;
107 if (pDBfree)
108 pDBfree->pnext = pDB;
109 aup->pDBfree = pDB;
110}
111
112
113/*
114 DMA memory allocation, derived from pci_alloc_consistent.
115 However, the Au1000 data cache is coherent (when programmed
116 so), therefore we return KSEG0 address, not KSEG1.
117*/
118static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
119{
120 void *ret;
121 int gfp = GFP_ATOMIC | GFP_DMA;
122
123 ret = (void *) __get_free_pages(gfp, get_order(size));
124
125 if (ret != NULL) {
126 memset(ret, 0, size);
127 *dma_handle = virt_to_bus(ret);
128 ret = (void *)KSEG0ADDR(ret);
129 }
130 return ret;
131}
132
133
134static void dma_free(void *vaddr, size_t size)
135{
136 vaddr = (void *)KSEG0ADDR(vaddr);
137 free_pages((unsigned long) vaddr, get_order(size));
138}
139
140
141static void
142setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
143{
144 int i;
145 for (i=0; i<NUM_IR_DESC; i++) {
146 aup->rx_ring[i] = (volatile ring_dest_t *)
147 (rx_base + sizeof(ring_dest_t)*i);
148 }
149 for (i=0; i<NUM_IR_DESC; i++) {
150 aup->tx_ring[i] = (volatile ring_dest_t *)
151 (tx_base + sizeof(ring_dest_t)*i);
152 }
153}
154
155static int au1k_irda_init(void)
156{
157 static unsigned version_printed = 0;
158 struct au1k_private *aup;
159 struct net_device *dev;
160 int err;
161
162 if (version_printed++ == 0) printk(version);
163
164 dev = alloc_irdadev(sizeof(struct au1k_private));
165 if (!dev)
166 return -ENOMEM;
167
168 dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */
169 err = au1k_irda_net_init(dev);
170 if (err)
171 goto out;
172 err = register_netdev(dev);
173 if (err)
174 goto out1;
175 ir_devs[0] = dev;
176 printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
177 return 0;
178
179out1:
180 aup = netdev_priv(dev);
181 dma_free((void *)aup->db[0].vaddr,
182 MAX_BUF_SIZE * 2*NUM_IR_DESC);
183 dma_free((void *)aup->rx_ring[0],
184 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
185 kfree(aup->rx_buff.head);
186out:
187 free_netdev(dev);
188 return err;
189}
190
191static int au1k_irda_init_iobuf(iobuff_t *io, int size)
192{
193 io->head = kmalloc(size, GFP_KERNEL);
194 if (io->head != NULL) {
195 io->truesize = size;
196 io->in_frame = FALSE;
197 io->state = OUTSIDE_FRAME;
198 io->data = io->head;
199 }
200 return io->head ? 0 : -ENOMEM;
201}
202
203static int au1k_irda_net_init(struct net_device *dev)
204{
205 struct au1k_private *aup = netdev_priv(dev);
206 int i, retval = 0, err;
207 db_dest_t *pDB, *pDBfree;
208 dma_addr_t temp;
209
210 err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
211 if (err)
212 goto out1;
213
214 dev->open = au1k_irda_start;
215 dev->hard_start_xmit = au1k_irda_hard_xmit;
216 dev->stop = au1k_irda_stop;
217 dev->get_stats = au1k_irda_stats;
218 dev->do_ioctl = au1k_irda_ioctl;
219 dev->tx_timeout = au1k_tx_timeout;
220
221 irda_init_max_qos_capabilies(&aup->qos);
222
223 /* The only value we must override it the baudrate */
224 aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
225 IR_115200|IR_576000 |(IR_4000000 << 8);
226
227 aup->qos.min_turn_time.bits = qos_mtt_bits;
228 irda_qos_bits_to_value(&aup->qos);
229
230 retval = -ENOMEM;
231
232 /* Tx ring follows rx ring + 512 bytes */
233 /* we need a 1k aligned buffer */
234 aup->rx_ring[0] = (ring_dest_t *)
235 dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp);
236 if (!aup->rx_ring[0])
237 goto out2;
238
239 /* allocate the data buffers */
240 aup->db[0].vaddr =
241 (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp);
242 if (!aup->db[0].vaddr)
243 goto out3;
244
245 setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
246
247 pDBfree = NULL;
248 pDB = aup->db;
249 for (i=0; i<(2*NUM_IR_DESC); i++) {
250 pDB->pnext = pDBfree;
251 pDBfree = pDB;
252 pDB->vaddr =
253 (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i);
254 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
255 pDB++;
256 }
257 aup->pDBfree = pDBfree;
258
259 /* attach a data buffer to each descriptor */
260 for (i=0; i<NUM_IR_DESC; i++) {
261 pDB = GetFreeDB(aup);
262 if (!pDB) goto out;
263 aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
264 aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
265 aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
266 aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
267 aup->rx_db_inuse[i] = pDB;
268 }
269 for (i=0; i<NUM_IR_DESC; i++) {
270 pDB = GetFreeDB(aup);
271 if (!pDB) goto out;
272 aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
273 aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
274 aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
275 aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
276 aup->tx_ring[i]->count_0 = 0;
277 aup->tx_ring[i]->count_1 = 0;
278 aup->tx_ring[i]->flags = 0;
279 aup->tx_db_inuse[i] = pDB;
280 }
281
282#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
283 /* power on */
284 bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK;
285 bcsr->resets |= BCSR_RESETS_IRDA_MODE_FULL;
286 au_sync();
287#endif
288
289 return 0;
290
291out3:
292 dma_free((void *)aup->rx_ring[0],
293 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
294out2:
295 kfree(aup->rx_buff.head);
296out1:
297 printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval);
298 return retval;
299}
300
301
302static int au1k_init(struct net_device *dev)
303{
304 struct au1k_private *aup = netdev_priv(dev);
305 int i;
306 u32 control;
307 u32 ring_address;
308
309 /* bring the device out of reset */
310 control = 0xe; /* coherent, clock enable, one half system clock */
311
312#ifndef CONFIG_CPU_LITTLE_ENDIAN
313 control |= 1;
314#endif
315 aup->tx_head = 0;
316 aup->tx_tail = 0;
317 aup->rx_head = 0;
318
319 for (i=0; i<NUM_IR_DESC; i++) {
320 aup->rx_ring[i]->flags = AU_OWN;
321 }
322
323 writel(control, IR_INTERFACE_CONFIG);
324 au_sync_delay(10);
325
326 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */
327 au_sync_delay(1);
328
329 writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN);
330
331 ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
332 writel(ring_address >> 26, IR_RING_BASE_ADDR_H);
333 writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L);
334
335 writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE);
336
337 writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */
338 writel(0, IR_RING_ADDR_CMPR);
339
340 au1k_irda_set_speed(dev, 9600);
341 return 0;
342}
343
344static int au1k_irda_start(struct net_device *dev)
345{
346 int retval;
347 char hwname[32];
348 struct au1k_private *aup = netdev_priv(dev);
349
350 if ((retval = au1k_init(dev))) {
351 printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
352 return retval;
353 }
354
355 if ((retval = request_irq(AU1000_IRDA_TX_INT, &au1k_irda_interrupt,
356 0, dev->name, dev))) {
357 printk(KERN_ERR "%s: unable to get IRQ %d\n",
358 dev->name, dev->irq);
359 return retval;
360 }
361 if ((retval = request_irq(AU1000_IRDA_RX_INT, &au1k_irda_interrupt,
362 0, dev->name, dev))) {
363 free_irq(AU1000_IRDA_TX_INT, dev);
364 printk(KERN_ERR "%s: unable to get IRQ %d\n",
365 dev->name, dev->irq);
366 return retval;
367 }
368
369 /* Give self a hardware name */
370 sprintf(hwname, "Au1000 SIR/FIR");
371 aup->irlap = irlap_open(dev, &aup->qos, hwname);
372 netif_start_queue(dev);
373
374 writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */
375
376 aup->timer.expires = RUN_AT((3*HZ));
377 aup->timer.data = (unsigned long)dev;
378 return 0;
379}
380
381static int au1k_irda_stop(struct net_device *dev)
382{
383 struct au1k_private *aup = netdev_priv(dev);
384
385 /* disable interrupts */
386 writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2);
387 writel(0, IR_CONFIG_1);
388 writel(0, IR_INTERFACE_CONFIG); /* disable clock */
389 au_sync();
390
391 if (aup->irlap) {
392 irlap_close(aup->irlap);
393 aup->irlap = NULL;
394 }
395
396 netif_stop_queue(dev);
397 del_timer(&aup->timer);
398
399 /* disable the interrupt */
400 free_irq(AU1000_IRDA_TX_INT, dev);
401 free_irq(AU1000_IRDA_RX_INT, dev);
402 return 0;
403}
404
405static void __exit au1k_irda_exit(void)
406{
407 struct net_device *dev = ir_devs[0];
408 struct au1k_private *aup = netdev_priv(dev);
409
410 unregister_netdev(dev);
411
412 dma_free((void *)aup->db[0].vaddr,
413 MAX_BUF_SIZE * 2*NUM_IR_DESC);
414 dma_free((void *)aup->rx_ring[0],
415 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
416 kfree(aup->rx_buff.head);
417 free_netdev(dev);
418}
419
420
421static inline void
422update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
423{
424 struct au1k_private *aup = netdev_priv(dev);
425 struct net_device_stats *ps = &aup->stats;
426
427 ps->tx_packets++;
428 ps->tx_bytes += pkt_len;
429
430 if (status & IR_TX_ERROR) {
431 ps->tx_errors++;
432 ps->tx_aborted_errors++;
433 }
434}
435
436
437static void au1k_tx_ack(struct net_device *dev)
438{
439 struct au1k_private *aup = netdev_priv(dev);
440 volatile ring_dest_t *ptxd;
441
442 ptxd = aup->tx_ring[aup->tx_tail];
443 while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
444 update_tx_stats(dev, ptxd->flags,
445 ptxd->count_1<<8 | ptxd->count_0);
446 ptxd->count_0 = 0;
447 ptxd->count_1 = 0;
448 au_sync();
449
450 aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
451 ptxd = aup->tx_ring[aup->tx_tail];
452
453 if (aup->tx_full) {
454 aup->tx_full = 0;
455 netif_wake_queue(dev);
456 }
457 }
458
459 if (aup->tx_tail == aup->tx_head) {
460 if (aup->newspeed) {
461 au1k_irda_set_speed(dev, aup->newspeed);
462 aup->newspeed = 0;
463 }
464 else {
465 writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE,
466 IR_CONFIG_1);
467 au_sync();
468 writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE,
469 IR_CONFIG_1);
470 writel(0, IR_RING_PROMPT);
471 au_sync();
472 }
473 }
474}
475
476
477/*
478 * Au1000 transmit routine.
479 */
480static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
481{
482 struct au1k_private *aup = netdev_priv(dev);
483 int speed = irda_get_next_speed(skb);
484 volatile ring_dest_t *ptxd;
485 u32 len;
486
487 u32 flags;
488 db_dest_t *pDB;
489
490 if (speed != aup->speed && speed != -1) {
491 aup->newspeed = speed;
492 }
493
494 if ((skb->len == 0) && (aup->newspeed)) {
495 if (aup->tx_tail == aup->tx_head) {
496 au1k_irda_set_speed(dev, speed);
497 aup->newspeed = 0;
498 }
499 dev_kfree_skb(skb);
500 return 0;
501 }
502
503 ptxd = aup->tx_ring[aup->tx_head];
504 flags = ptxd->flags;
505
506 if (flags & AU_OWN) {
507 printk(KERN_DEBUG "%s: tx_full\n", dev->name);
508 netif_stop_queue(dev);
509 aup->tx_full = 1;
510 return 1;
511 }
512 else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
513 printk(KERN_DEBUG "%s: tx_full\n", dev->name);
514 netif_stop_queue(dev);
515 aup->tx_full = 1;
516 return 1;
517 }
518
519 pDB = aup->tx_db_inuse[aup->tx_head];
520
521#if 0
522 if (read_ir_reg(IR_RX_BYTE_CNT) != 0) {
523 printk("tx warning: rx byte cnt %x\n",
524 read_ir_reg(IR_RX_BYTE_CNT));
525 }
526#endif
527
528 if (aup->speed == 4000000) {
529 /* FIR */
530 memcpy((void *)pDB->vaddr, skb->data, skb->len);
531 ptxd->count_0 = skb->len & 0xff;
532 ptxd->count_1 = (skb->len >> 8) & 0xff;
533
534 }
535 else {
536 /* SIR */
537 len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
538 ptxd->count_0 = len & 0xff;
539 ptxd->count_1 = (len >> 8) & 0xff;
540 ptxd->flags |= IR_DIS_CRC;
541 au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c);
542 }
543 ptxd->flags |= AU_OWN;
544 au_sync();
545
546 writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1);
547 writel(0, IR_RING_PROMPT);
548 au_sync();
549
550 dev_kfree_skb(skb);
551 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
552 dev->trans_start = jiffies;
553 return 0;
554}
555
556
557static inline void
558update_rx_stats(struct net_device *dev, u32 status, u32 count)
559{
560 struct au1k_private *aup = netdev_priv(dev);
561 struct net_device_stats *ps = &aup->stats;
562
563 ps->rx_packets++;
564
565 if (status & IR_RX_ERROR) {
566 ps->rx_errors++;
567 if (status & (IR_PHY_ERROR|IR_FIFO_OVER))
568 ps->rx_missed_errors++;
569 if (status & IR_MAX_LEN)
570 ps->rx_length_errors++;
571 if (status & IR_CRC_ERROR)
572 ps->rx_crc_errors++;
573 }
574 else
575 ps->rx_bytes += count;
576}
577
578/*
579 * Au1000 receive routine.
580 */
581static int au1k_irda_rx(struct net_device *dev)
582{
583 struct au1k_private *aup = netdev_priv(dev);
584 struct sk_buff *skb;
585 volatile ring_dest_t *prxd;
586 u32 flags, count;
587 db_dest_t *pDB;
588
589 prxd = aup->rx_ring[aup->rx_head];
590 flags = prxd->flags;
591
592 while (!(flags & AU_OWN)) {
593 pDB = aup->rx_db_inuse[aup->rx_head];
594 count = prxd->count_1<<8 | prxd->count_0;
595 if (!(flags & IR_RX_ERROR)) {
596 /* good frame */
597 update_rx_stats(dev, flags, count);
598 skb=alloc_skb(count+1,GFP_ATOMIC);
599 if (skb == NULL) {
600 aup->stats.rx_dropped++;
601 continue;
602 }
603 skb_reserve(skb, 1);
604 if (aup->speed == 4000000)
605 skb_put(skb, count);
606 else
607 skb_put(skb, count-2);
608 memcpy(skb->data, (void *)pDB->vaddr, count-2);
609 skb->dev = dev;
610 skb->mac.raw = skb->data;
611 skb->protocol = htons(ETH_P_IRDA);
612 netif_rx(skb);
613 prxd->count_0 = 0;
614 prxd->count_1 = 0;
615 }
616 prxd->flags |= AU_OWN;
617 aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
618 writel(0, IR_RING_PROMPT);
619 au_sync();
620
621 /* next descriptor */
622 prxd = aup->rx_ring[aup->rx_head];
623 flags = prxd->flags;
624 dev->last_rx = jiffies;
625
626 }
627 return 0;
628}
629
630
631void au1k_irda_interrupt(int irq, void *dev_id, struct pt_regs *regs)
632{
633 struct net_device *dev = (struct net_device *) dev_id;
634
635 if (dev == NULL) {
636 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
637 return;
638 }
639
640 writel(0, IR_INT_CLEAR); /* ack irda interrupts */
641
642 au1k_irda_rx(dev);
643 au1k_tx_ack(dev);
644}
645
646
647/*
648 * The Tx ring has been full longer than the watchdog timeout
649 * value. The transmitter must be hung?
650 */
651static void au1k_tx_timeout(struct net_device *dev)
652{
653 u32 speed;
654 struct au1k_private *aup = netdev_priv(dev);
655
656 printk(KERN_ERR "%s: tx timeout\n", dev->name);
657 speed = aup->speed;
658 aup->speed = 0;
659 au1k_irda_set_speed(dev, speed);
660 aup->tx_full = 0;
661 netif_wake_queue(dev);
662}
663
664
665/*
666 * Set the IrDA communications speed.
667 */
668static int
669au1k_irda_set_speed(struct net_device *dev, int speed)
670{
671 unsigned long flags;
672 struct au1k_private *aup = netdev_priv(dev);
673 u32 control;
674 int ret = 0, timeout = 10, i;
675 volatile ring_dest_t *ptxd;
676#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
677 unsigned long irda_resets;
678#endif
679
680 if (speed == aup->speed)
681 return ret;
682
683 spin_lock_irqsave(&ir_lock, flags);
684
685 /* disable PHY first */
686 writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE);
687
688 /* disable RX/TX */
689 writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE),
690 IR_CONFIG_1);
691 au_sync_delay(1);
692 while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) {
693 mdelay(1);
694 if (!timeout--) {
695 printk(KERN_ERR "%s: rx/tx disable timeout\n",
696 dev->name);
697 break;
698 }
699 }
700
701 /* disable DMA */
702 writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1);
703 au_sync_delay(1);
704
705 /*
706 * After we disable tx/rx. the index pointers
707 * go back to zero.
708 */
709 aup->tx_head = aup->tx_tail = aup->rx_head = 0;
710 for (i=0; i<NUM_IR_DESC; i++) {
711 ptxd = aup->tx_ring[i];
712 ptxd->flags = 0;
713 ptxd->count_0 = 0;
714 ptxd->count_1 = 0;
715 }
716
717 for (i=0; i<NUM_IR_DESC; i++) {
718 ptxd = aup->rx_ring[i];
719 ptxd->count_0 = 0;
720 ptxd->count_1 = 0;
721 ptxd->flags = AU_OWN;
722 }
723
724 if (speed == 4000000) {
725#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
726 bcsr->resets |= BCSR_RESETS_FIR_SEL;
727#else /* Pb1000 and Pb1100 */
728 writel(1<<13, CPLD_AUX1);
729#endif
730 }
731 else {
732#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
733 bcsr->resets &= ~BCSR_RESETS_FIR_SEL;
734#else /* Pb1000 and Pb1100 */
735 writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
736#endif
737 }
738
739 switch (speed) {
740 case 9600:
741 writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
742 writel(IR_SIR_MODE, IR_CONFIG_1);
743 break;
744 case 19200:
745 writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
746 writel(IR_SIR_MODE, IR_CONFIG_1);
747 break;
748 case 38400:
749 writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
750 writel(IR_SIR_MODE, IR_CONFIG_1);
751 break;
752 case 57600:
753 writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
754 writel(IR_SIR_MODE, IR_CONFIG_1);
755 break;
756 case 115200:
757 writel(12<<5, IR_WRITE_PHY_CONFIG);
758 writel(IR_SIR_MODE, IR_CONFIG_1);
759 break;
760 case 4000000:
761 writel(0xF, IR_WRITE_PHY_CONFIG);
762 writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1);
763 break;
764 default:
765 printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
766 ret = -EINVAL;
767 break;
768 }
769
770 aup->speed = speed;
771 writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE);
772 au_sync();
773
774 control = read_ir_reg(IR_ENABLE);
775 writel(0, IR_RING_PROMPT);
776 au_sync();
777
778 if (control & (1<<14)) {
779 printk(KERN_ERR "%s: configuration error\n", dev->name);
780 }
781 else {
782 if (control & (1<<11))
783 printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
784 if (control & (1<<12))
785 printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
786 if (control & (1<<13))
787 printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
788 if (control & (1<<10))
789 printk(KERN_DEBUG "%s TX enabled\n", dev->name);
790 if (control & (1<<9))
791 printk(KERN_DEBUG "%s RX enabled\n", dev->name);
792 }
793
794 spin_unlock_irqrestore(&ir_lock, flags);
795 return ret;
796}
797
798static int
799au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
800{
801 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
802 struct au1k_private *aup = netdev_priv(dev);
803 int ret = -EOPNOTSUPP;
804
805 switch (cmd) {
806 case SIOCSBANDWIDTH:
807 if (capable(CAP_NET_ADMIN)) {
808 /*
809 * We are unable to set the speed if the
810 * device is not running.
811 */
812 if (aup->open)
813 ret = au1k_irda_set_speed(dev,
814 rq->ifr_baudrate);
815 else {
816 printk(KERN_ERR "%s ioctl: !netif_running\n",
817 dev->name);
818 ret = 0;
819 }
820 }
821 break;
822
823 case SIOCSMEDIABUSY:
824 ret = -EPERM;
825 if (capable(CAP_NET_ADMIN)) {
826 irda_device_set_media_busy(dev, TRUE);
827 ret = 0;
828 }
829 break;
830
831 case SIOCGRECEIVING:
832 rq->ifr_receiving = 0;
833 break;
834 default:
835 break;
836 }
837 return ret;
838}
839
840
841static struct net_device_stats *au1k_irda_stats(struct net_device *dev)
842{
843 struct au1k_private *aup = netdev_priv(dev);
844 return &aup->stats;
845}
846
847MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
848MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
849
850module_init(au1k_irda_init);
851module_exit(au1k_irda_exit);
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
new file mode 100644
index 000000000000..0a08c539c051
--- /dev/null
+++ b/drivers/net/irda/donauboe.c
@@ -0,0 +1,1789 @@
1/*****************************************************************
2 *
3 * Filename: donauboe.c
4 * Version: 2.17
5 * Description: Driver for the Toshiba OBOE (or type-O or 701)
6 * FIR Chipset, also supports the DONAUOBOE (type-DO
7 * or d01) FIR chipset which as far as I know is
8 * register compatible.
9 * Documentation: http://libxg.free.fr/irda/lib-irda.html
10 * Status: Experimental.
11 * Author: James McKenzie <james@fishsoup.dhs.org>
12 * Created at: Sat May 8 12:35:27 1999
13 * Modified: Paul Bristow <paul.bristow@technologist.com>
14 * Modified: Mon Nov 11 19:10:05 1999
15 * Modified: James McKenzie <james@fishsoup.dhs.org>
16 * Modified: Thu Mar 16 12:49:00 2000 (Substantial rewrite)
17 * Modified: Sat Apr 29 00:23:03 2000 (Added DONAUOBOE support)
18 * Modified: Wed May 24 23:45:02 2000 (Fixed chipio_t structure)
19 * Modified: 2.13 Christian Gennerat <christian.gennerat@polytechnique.org>
20 * Modified: 2.13 dim jan 07 21:57:39 2001 (tested with kernel 2.4 & irnet/ppp)
21 * Modified: 2.14 Christian Gennerat <christian.gennerat@polytechnique.org>
22 * Modified: 2.14 lun fev 05 17:55:59 2001 (adapted to patch-2.4.1-pre8-irda1)
23 * Modified: 2.15 Martin Lucina <mato@kotelna.sk>
24 * Modified: 2.15 Fri Jun 21 20:40:59 2002 (sync with 2.4.18, substantial fixes)
25 * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
26 * Modified: 2.16 Sat Jun 22 18:54:29 2002 (fix freeregion, default to verbose)
27 * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
28 * Modified: 2.17 jeu sep 12 08:50:20 2002 (save_flags();cli(); replaced by spinlocks)
29 * Modified: 2.18 Christian Gennerat <christian.gennerat@polytechnique.org>
30 * Modified: 2.18 ven jan 10 03:14:16 2003 Change probe default options
31 *
32 * Copyright (c) 1999 James McKenzie, All Rights Reserved.
33 *
34 * This program is free software; you can redistribute it and/or
35 * modify it under the terms of the GNU General Public License as
36 * published by the Free Software Foundation; either version 2 of
37 * the License, or (at your option) any later version.
38 *
39 * Neither James McKenzie nor Cambridge University admit liability nor
40 * provide warranty for any of this software. This material is
41 * provided "AS-IS" and at no charge.
42 *
43 * Applicable Models : Libretto 100/110CT and many more.
44 * Toshiba refers to this chip as the type-O IR port,
45 * or the type-DO IR port.
46 *
47 ********************************************************************/
48
49/* Look at toshoboe.h (currently in include/net/irda) for details of */
50/* Where to get documentation on the chip */
51
52
53static char *rcsid =
54 "$Id: donauboe.c V2.18 ven jan 10 03:14:16 2003$";
55
56/* See below for a description of the logic in this driver */
57
58/* User servicable parts */
59/* USE_PROBE Create the code which probes the chip and does a few tests */
60/* do_probe module parameter Enable this code */
61/* Probe code is very useful for understanding how the hardware works */
62/* Use it with various combinations of TT_LEN, RX_LEN */
63/* Strongly recomended, disable if the probe fails on your machine */
64/* and send me <james@fishsoup.dhs.org> the output of dmesg */
65#define USE_PROBE 1
66#undef USE_PROBE
67
68/* Trace Transmit ring, interrupts, Receive ring or not ? */
69#define PROBE_VERBOSE 1
70
71/* Debug option, examine sent and received raw data */
72/* Irdadump is better, but does not see all packets. enable it if you want. */
73#undef DUMP_PACKETS
74
75/* MIR mode has not been tested. Some behaviour is different */
76/* Seems to work against an Ericsson R520 for me. -Martin */
77#define USE_MIR
78
79/* Schedule back to back hardware transmits wherever possible, otherwise */
80/* we need an interrupt for every frame, unset if oboe works for a bit and */
81/* then hangs */
82#define OPTIMIZE_TX
83
84/* Set the number of slots in the rings */
85/* If you get rx/tx fifo overflows at high bitrates, you can try increasing */
86/* these */
87
88#define RING_SIZE (OBOE_RING_SIZE_RX8 | OBOE_RING_SIZE_TX8)
89#define TX_SLOTS 8
90#define RX_SLOTS 8
91
92
93/* Less user servicable parts below here */
94
95/* Test, Transmit and receive buffer sizes, adjust at your peril */
96/* remarks: nfs usually needs 1k blocks */
97/* remarks: in SIR mode, CRC is received, -> RX_LEN=TX_LEN+2 */
98/* remarks: test accepts large blocks. Standard is 0x80 */
99/* When TT_LEN > RX_LEN (SIR mode) data is stored in successive slots. */
100/* When 3 or more slots are needed for each test packet, */
101/* data received in the first slots is overwritten, even */
102/* if OBOE_CTL_RX_HW_OWNS is not set, without any error! */
103#define TT_LEN 0x80
104#define TX_LEN 0xc00
105#define RX_LEN 0xc04
106/* Real transmitted length (SIR mode) is about 14+(2%*TX_LEN) more */
107/* long than user-defined length (see async_wrap_skb) and is less then 4K */
108/* Real received length is (max RX_LEN) differs from user-defined */
109/* length only b the CRC (2 or 4 bytes) */
110#define BUF_SAFETY 0x7a
111#define RX_BUF_SZ (RX_LEN)
112#define TX_BUF_SZ (TX_LEN+BUF_SAFETY)
113
114
115/* Logic of the netdev part of this driver */
116
117/* The RX ring is filled with buffers, when a packet arrives */
118/* it is DMA'd into the buffer which is marked used and RxDone called */
119/* RxDone forms an skb (and checks the CRC if in SIR mode) and ships */
120/* the packet off upstairs */
121
122/* The transmitter on the oboe chip can work in one of two modes */
123/* for each ring->tx[] the transmitter can either */
124/* a) transmit the packet, leave the trasmitter enabled and proceed to */
125/* the next ring */
126/* OR */
127/* b) transmit the packet, switch off the transmitter and issue TxDone */
128
129/* All packets are entered into the ring in mode b), if the ring was */
130/* empty the transmitter is started. */
131
132/* If OPTIMIZE_TX is defined then in TxDone if the ring contains */
133/* more than one packet, all but the last are set to mode a) [HOWEVER */
134/* the hardware may not notice this, this is why we start in mode b) ] */
135/* then restart the transmitter */
136
137/* If OPTIMIZE_TX is not defined then we just restart the transmitter */
138/* if the ring isn't empty */
139
140/* Speed changes are delayed until the TxRing is empty */
141/* mtt is handled by generating packets with bad CRCs, before the data */
142
143/* TODO: */
144/* check the mtt works ok */
145/* finish the watchdog */
146
147/* No user servicable parts below here */
148
149#include <linux/module.h>
150
151#include <linux/kernel.h>
152#include <linux/types.h>
153#include <linux/skbuff.h>
154#include <linux/netdevice.h>
155#include <linux/ioport.h>
156#include <linux/delay.h>
157#include <linux/slab.h>
158#include <linux/init.h>
159#include <linux/pci.h>
160#include <linux/rtnetlink.h>
161
162#include <asm/system.h>
163#include <asm/io.h>
164
165#include <net/irda/wrapper.h>
166#include <net/irda/irda.h>
167//#include <net/irda/irmod.h>
168//#include <net/irda/irlap_frame.h>
169#include <net/irda/irda_device.h>
170#include <net/irda/crc.h>
171
172#include "donauboe.h"
173
174#define INB(port) inb_p(port)
175#define OUTB(val,port) outb_p(val,port)
176#define OUTBP(val,port) outb_p(val,port)
177
178#define PROMPT OUTB(OBOE_PROMPT_BIT,OBOE_PROMPT);
179
180#if PROBE_VERBOSE
181#define PROBE_DEBUG(args...) (printk (args))
182#else
183#define PROBE_DEBUG(args...) ;
184#endif
185
186/* Set the DMA to be byte at a time */
187#define CONFIG0H_DMA_OFF OBOE_CONFIG0H_RCVANY
188#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
189#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
190
191static struct pci_device_id toshoboe_pci_tbl[] = {
192 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
193 { PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
194 { } /* Terminating entry */
195};
196MODULE_DEVICE_TABLE(pci, toshoboe_pci_tbl);
197
198#define DRIVER_NAME "toshoboe"
199static char *driver_name = DRIVER_NAME;
200
201static int max_baud = 4000000;
202#ifdef USE_PROBE
203static int do_probe = 0;
204#endif
205
206
207/**********************************************************************/
208static int
209toshoboe_checkfcs (unsigned char *buf, int len)
210{
211 int i;
212 union
213 {
214 __u16 value;
215 __u8 bytes[2];
216 }
217 fcs;
218
219 fcs.value = INIT_FCS;
220
221 for (i = 0; i < len; ++i)
222 fcs.value = irda_fcs (fcs.value, *(buf++));
223
224 return (fcs.value == GOOD_FCS);
225}
226
227/***********************************************************************/
228/* Generic chip handling code */
229#ifdef DUMP_PACKETS
230static unsigned char dump[50];
231static void
232_dumpbufs (unsigned char *data, int len, char tete)
233{
234int i,j;
235char head=tete;
236for (i=0;i<len;i+=16) {
237 for (j=0;j<16 && i+j<len;j++) { sprintf(&dump[3*j],"%02x.",data[i+j]); }
238 dump [3*j]=0;
239 IRDA_DEBUG (2, "%c%s\n",head , dump);
240 head='+';
241 }
242}
243#endif
244
245#ifdef USE_PROBE
246/* Dump the registers */
247static void
248toshoboe_dumpregs (struct toshoboe_cb *self)
249{
250 __u32 ringbase;
251
252 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
253
254 ringbase = INB (OBOE_RING_BASE0) << 10;
255 ringbase |= INB (OBOE_RING_BASE1) << 18;
256 ringbase |= INB (OBOE_RING_BASE2) << 26;
257
258 printk (KERN_ERR DRIVER_NAME ": Register dump:\n");
259 printk (KERN_ERR "Interrupts: Tx:%d Rx:%d TxUnder:%d RxOver:%d Sip:%d\n",
260 self->int_tx, self->int_rx, self->int_txunder, self->int_rxover,
261 self->int_sip);
262 printk (KERN_ERR "RX %02x TX %02x RingBase %08x\n",
263 INB (OBOE_RXSLOT), INB (OBOE_TXSLOT), ringbase);
264 printk (KERN_ERR "RING_SIZE %02x IER %02x ISR %02x\n",
265 INB (OBOE_RING_SIZE), INB (OBOE_IER), INB (OBOE_ISR));
266 printk (KERN_ERR "CONFIG1 %02x STATUS %02x\n",
267 INB (OBOE_CONFIG1), INB (OBOE_STATUS));
268 printk (KERN_ERR "CONFIG0 %02x%02x ENABLE %02x%02x\n",
269 INB (OBOE_CONFIG0H), INB (OBOE_CONFIG0L),
270 INB (OBOE_ENABLEH), INB (OBOE_ENABLEL));
271 printk (KERN_ERR "NEW_PCONFIG %02x%02x CURR_PCONFIG %02x%02x\n",
272 INB (OBOE_NEW_PCONFIGH), INB (OBOE_NEW_PCONFIGL),
273 INB (OBOE_CURR_PCONFIGH), INB (OBOE_CURR_PCONFIGL));
274 printk (KERN_ERR "MAXLEN %02x%02x RXCOUNT %02x%02x\n",
275 INB (OBOE_MAXLENH), INB (OBOE_MAXLENL),
276 INB (OBOE_RXCOUNTL), INB (OBOE_RXCOUNTH));
277
278 if (self->ring)
279 {
280 int i;
281 ringbase = virt_to_bus (self->ring);
282 printk (KERN_ERR "Ring at %08x:\n", ringbase);
283 printk (KERN_ERR "RX:");
284 for (i = 0; i < RX_SLOTS; ++i)
285 printk (" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
286 printk ("\n");
287 printk (KERN_ERR "TX:");
288 for (i = 0; i < RX_SLOTS; ++i)
289 printk (" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
290 printk ("\n");
291 }
292}
293#endif
294
295/*Don't let the chip look at memory */
296static void
297toshoboe_disablebm (struct toshoboe_cb *self)
298{
299 __u8 command;
300 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
301
302 pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
303 command &= ~PCI_COMMAND_MASTER;
304 pci_write_config_byte (self->pdev, PCI_COMMAND, command);
305
306}
307
308/* Shutdown the chip and point the taskfile reg somewhere else */
309static void
310toshoboe_stopchip (struct toshoboe_cb *self)
311{
312 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
313
314 /*Disable interrupts */
315 OUTB (0x0, OBOE_IER);
316 /*Disable DMA, Disable Rx, Disable Tx */
317 OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
318 /*Disable SIR MIR FIR, Tx and Rx */
319 OUTB (0x00, OBOE_ENABLEH);
320 /*Point the ring somewhere safe */
321 OUTB (0x3f, OBOE_RING_BASE2);
322 OUTB (0xff, OBOE_RING_BASE1);
323 OUTB (0xff, OBOE_RING_BASE0);
324
325 OUTB (RX_LEN >> 8, OBOE_MAXLENH);
326 OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
327
328 /*Acknoledge any pending interrupts */
329 OUTB (0xff, OBOE_ISR);
330
331 /*Why */
332 OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
333
334 /*switch it off */
335 OUTB (OBOE_CONFIG1_OFF, OBOE_CONFIG1);
336
337 toshoboe_disablebm (self);
338}
339
340/* Transmitter initialization */
341static void
342toshoboe_start_DMA (struct toshoboe_cb *self, int opts)
343{
344 OUTB (0x0, OBOE_ENABLEH);
345 OUTB (CONFIG0H_DMA_ON | opts, OBOE_CONFIG0H);
346 OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
347 PROMPT;
348}
349
350/*Set the baud rate */
351static void
352toshoboe_setbaud (struct toshoboe_cb *self)
353{
354 __u16 pconfig = 0;
355 __u8 config0l = 0;
356
357 IRDA_DEBUG (2, "%s(%d/%d)\n", __FUNCTION__, self->speed, self->io.speed);
358
359 switch (self->speed)
360 {
361 case 2400:
362 case 4800:
363 case 9600:
364 case 19200:
365 case 38400:
366 case 57600:
367 case 115200:
368#ifdef USE_MIR
369 case 1152000:
370#endif
371 case 4000000:
372 break;
373 default:
374
375 printk (KERN_ERR DRIVER_NAME ": switch to unsupported baudrate %d\n",
376 self->speed);
377 return;
378 }
379
380 switch (self->speed)
381 {
382 /* For SIR the preamble is done by adding XBOFs */
383 /* to the packet */
384 /* set to filtered SIR mode, filter looks for BOF and EOF */
385 case 2400:
386 pconfig |= 47 << OBOE_PCONFIG_BAUDSHIFT;
387 pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
388 break;
389 case 4800:
390 pconfig |= 23 << OBOE_PCONFIG_BAUDSHIFT;
391 pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
392 break;
393 case 9600:
394 pconfig |= 11 << OBOE_PCONFIG_BAUDSHIFT;
395 pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
396 break;
397 case 19200:
398 pconfig |= 5 << OBOE_PCONFIG_BAUDSHIFT;
399 pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
400 break;
401 case 38400:
402 pconfig |= 2 << OBOE_PCONFIG_BAUDSHIFT;
403 pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
404 break;
405 case 57600:
406 pconfig |= 1 << OBOE_PCONFIG_BAUDSHIFT;
407 pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
408 break;
409 case 115200:
410 pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
411 pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
412 break;
413 default:
414 /*Set to packet based reception */
415 OUTB (RX_LEN >> 8, OBOE_MAXLENH);
416 OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
417 break;
418 }
419
420 switch (self->speed)
421 {
422 case 2400:
423 case 4800:
424 case 9600:
425 case 19200:
426 case 38400:
427 case 57600:
428 case 115200:
429 config0l = OBOE_CONFIG0L_ENSIR;
430 if (self->async)
431 {
432 /*Set to character based reception */
433 /*System will lock if MAXLEN=0 */
434 /*so have to be careful */
435 OUTB (0x01, OBOE_MAXLENH);
436 OUTB (0x01, OBOE_MAXLENL);
437 OUTB (0x00, OBOE_MAXLENH);
438 }
439 else
440 {
441 /*Set to packet based reception */
442 config0l |= OBOE_CONFIG0L_ENSIRF;
443 OUTB (RX_LEN >> 8, OBOE_MAXLENH);
444 OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
445 }
446 break;
447
448#ifdef USE_MIR
449 /* MIR mode */
450 /* Set for 16 bit CRC and enable MIR */
451 /* Preamble now handled by the chip */
452 case 1152000:
453 pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
454 pconfig |= 8 << OBOE_PCONFIG_WIDTHSHIFT;
455 pconfig |= 1 << OBOE_PCONFIG_PREAMBLESHIFT;
456 config0l = OBOE_CONFIG0L_CRC16 | OBOE_CONFIG0L_ENMIR;
457 break;
458#endif
459 /* FIR mode */
460 /* Set for 32 bit CRC and enable FIR */
461 /* Preamble handled by the chip */
462 case 4000000:
463 pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
464 /* Documentation says 14, but toshiba use 15 in their drivers */
465 pconfig |= 15 << OBOE_PCONFIG_PREAMBLESHIFT;
466 config0l = OBOE_CONFIG0L_ENFIR;
467 break;
468 }
469
470 /* Copy into new PHY config buffer */
471 OUTBP (pconfig >> 8, OBOE_NEW_PCONFIGH);
472 OUTB (pconfig & 0xff, OBOE_NEW_PCONFIGL);
473 OUTB (config0l, OBOE_CONFIG0L);
474
475 /* Now make OBOE copy from new PHY to current PHY */
476 OUTB (0x0, OBOE_ENABLEH);
477 OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
478 PROMPT;
479
480 /* speed change executed */
481 self->new_speed = 0;
482 self->io.speed = self->speed;
483}
484
485/*Let the chip look at memory */
486static void
487toshoboe_enablebm (struct toshoboe_cb *self)
488{
489 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
490 pci_set_master (self->pdev);
491}
492
493/*setup the ring */
494static void
495toshoboe_initring (struct toshoboe_cb *self)
496{
497 int i;
498
499 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
500
501 for (i = 0; i < TX_SLOTS; ++i)
502 {
503 self->ring->tx[i].len = 0;
504 self->ring->tx[i].control = 0x00;
505 self->ring->tx[i].address = virt_to_bus (self->tx_bufs[i]);
506 }
507
508 for (i = 0; i < RX_SLOTS; ++i)
509 {
510 self->ring->rx[i].len = RX_LEN;
511 self->ring->rx[i].len = 0;
512 self->ring->rx[i].address = virt_to_bus (self->rx_bufs[i]);
513 self->ring->rx[i].control = OBOE_CTL_RX_HW_OWNS;
514 }
515}
516
517static void
518toshoboe_resetptrs (struct toshoboe_cb *self)
519{
520 /* Can reset pointers by twidling DMA */
521 OUTB (0x0, OBOE_ENABLEH);
522 OUTBP (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
523 OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
524
525 self->rxs = inb_p (OBOE_RXSLOT) & OBOE_SLOT_MASK;
526 self->txs = inb_p (OBOE_TXSLOT) & OBOE_SLOT_MASK;
527}
528
529/* Called in locked state */
530static void
531toshoboe_initptrs (struct toshoboe_cb *self)
532{
533
534 /* spin_lock_irqsave(self->spinlock, flags); */
535 /* save_flags (flags); */
536
537 /* Can reset pointers by twidling DMA */
538 toshoboe_resetptrs (self);
539
540 OUTB (0x0, OBOE_ENABLEH);
541 OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
542 OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
543
544 self->txpending = 0;
545
546 /* spin_unlock_irqrestore(self->spinlock, flags); */
547 /* restore_flags (flags); */
548}
549
550/* Wake the chip up and get it looking at the rings */
551/* Called in locked state */
552static void
553toshoboe_startchip (struct toshoboe_cb *self)
554{
555 __u32 physaddr;
556
557 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
558
559 toshoboe_initring (self);
560 toshoboe_enablebm (self);
561 OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1);
562 OUTBP (OBOE_CONFIG1_ON, OBOE_CONFIG1);
563
564 /* Stop the clocks */
565 OUTB (0, OBOE_ENABLEH);
566
567 /*Set size of rings */
568 OUTB (RING_SIZE, OBOE_RING_SIZE);
569
570 /*Acknoledge any pending interrupts */
571 OUTB (0xff, OBOE_ISR);
572
573 /*Enable ints */
574 OUTB (OBOE_INT_TXDONE | OBOE_INT_RXDONE |
575 OBOE_INT_TXUNDER | OBOE_INT_RXOVER | OBOE_INT_SIP , OBOE_IER);
576
577 /*Acknoledge any pending interrupts */
578 OUTB (0xff, OBOE_ISR);
579
580 /*Set the maximum packet length to 0xfff (4095) */
581 OUTB (RX_LEN >> 8, OBOE_MAXLENH);
582 OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
583
584 /*Shutdown DMA */
585 OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
586
587 /*Find out where the rings live */
588 physaddr = virt_to_bus (self->ring);
589
590 IRDA_ASSERT ((physaddr & 0x3ff) == 0,
591 printk (KERN_ERR DRIVER_NAME "ring not correctly aligned\n");
592 return;);
593
594 OUTB ((physaddr >> 10) & 0xff, OBOE_RING_BASE0);
595 OUTB ((physaddr >> 18) & 0xff, OBOE_RING_BASE1);
596 OUTB ((physaddr >> 26) & 0x3f, OBOE_RING_BASE2);
597
598 /*Enable DMA controler in byte mode and RX */
599 OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
600
601 /* Start up the clocks */
602 OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
603
604 /*set to sensible speed */
605 self->speed = 9600;
606 toshoboe_setbaud (self);
607 toshoboe_initptrs (self);
608}
609
610static void
611toshoboe_isntstuck (struct toshoboe_cb *self)
612{
613}
614
615static void
616toshoboe_checkstuck (struct toshoboe_cb *self)
617{
618 unsigned long flags;
619
620 if (0)
621 {
622 spin_lock_irqsave(&self->spinlock, flags);
623
624 /* This will reset the chip completely */
625 printk (KERN_ERR DRIVER_NAME ": Resetting chip\n");
626
627 toshoboe_stopchip (self);
628 toshoboe_startchip (self);
629 spin_unlock_irqrestore(&self->spinlock, flags);
630 }
631}
632
633/*Generate packet of about mtt us long */
634static int
635toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt)
636{
637 int xbofs;
638
639 xbofs = ((int) (mtt/100)) * (int) (self->speed);
640 xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/
641 xbofs++;
642
643 IRDA_DEBUG (2, DRIVER_NAME
644 ": generated mtt of %d bytes for %d us at %d baud\n"
645 , xbofs,mtt,self->speed);
646
647 if (xbofs > TX_LEN)
648 {
649 printk (KERN_ERR DRIVER_NAME ": wanted %d bytes MTT but TX_LEN is %d\n",
650 xbofs, TX_LEN);
651 xbofs = TX_LEN;
652 }
653
654 /*xbofs will do for SIR, MIR and FIR,SIR mode doesn't generate a checksum anyway */
655 memset (buf, XBOF, xbofs);
656
657 return xbofs;
658}
659
660static int toshoboe_invalid_dev(int irq)
661{
662 printk (KERN_WARNING DRIVER_NAME ": irq %d for unknown device.\n", irq);
663 return 1;
664}
665
666#ifdef USE_PROBE
667/***********************************************************************/
668/* Probe code */
669
670static void
671toshoboe_dumptx (struct toshoboe_cb *self)
672{
673 int i;
674 PROBE_DEBUG(KERN_WARNING "TX:");
675 for (i = 0; i < RX_SLOTS; ++i)
676 PROBE_DEBUG(" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
677 PROBE_DEBUG(" [%d]\n",self->speed);
678}
679
680static void
681toshoboe_dumprx (struct toshoboe_cb *self, int score)
682{
683 int i;
684 PROBE_DEBUG(" %d\nRX:",score);
685 for (i = 0; i < RX_SLOTS; ++i)
686 PROBE_DEBUG(" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
687 PROBE_DEBUG("\n");
688}
689
690static inline int
691stuff_byte (__u8 byte, __u8 * buf)
692{
693 switch (byte)
694 {
695 case BOF: /* FALLTHROUGH */
696 case EOF: /* FALLTHROUGH */
697 case CE:
698 /* Insert transparently coded */
699 buf[0] = CE; /* Send link escape */
700 buf[1] = byte ^ IRDA_TRANS; /* Complement bit 5 */
701 return 2;
702 /* break; */
703 default:
704 /* Non-special value, no transparency required */
705 buf[0] = byte;
706 return 1;
707 /* break; */
708 }
709}
710
711static irqreturn_t
712toshoboe_probeinterrupt (int irq, void *dev_id, struct pt_regs *regs)
713{
714 struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
715 __u8 irqstat;
716
717 if (self == NULL && toshoboe_invalid_dev(irq))
718 return IRQ_NONE;
719
720 irqstat = INB (OBOE_ISR);
721
722/* was it us */
723 if (!(irqstat & OBOE_INT_MASK))
724 return IRQ_NONE;
725
726/* Ack all the interrupts */
727 OUTB (irqstat, OBOE_ISR);
728
729 if (irqstat & OBOE_INT_TXDONE)
730 {
731 int txp;
732
733 self->int_tx++;
734 PROBE_DEBUG("T");
735
736 txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
737 if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
738 {
739 self->int_tx+=100;
740 PROBE_DEBUG("S");
741 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
742 }
743 }
744
745 if (irqstat & OBOE_INT_RXDONE) {
746 self->int_rx++;
747 PROBE_DEBUG("R"); }
748 if (irqstat & OBOE_INT_TXUNDER) {
749 self->int_txunder++;
750 PROBE_DEBUG("U"); }
751 if (irqstat & OBOE_INT_RXOVER) {
752 self->int_rxover++;
753 PROBE_DEBUG("O"); }
754 if (irqstat & OBOE_INT_SIP) {
755 self->int_sip++;
756 PROBE_DEBUG("I"); }
757 return IRQ_HANDLED;
758}
759
760static int
761toshoboe_maketestpacket (unsigned char *buf, int badcrc, int fir)
762{
763 int i;
764 int len = 0;
765 union
766 {
767 __u16 value;
768 __u8 bytes[2];
769 }
770 fcs;
771
772 if (fir)
773 {
774 memset (buf, 0, TT_LEN);
775 return (TT_LEN);
776 }
777
778 fcs.value = INIT_FCS;
779
780 memset (buf, XBOF, 10);
781 len += 10;
782 buf[len++] = BOF;
783
784 for (i = 0; i < TT_LEN; ++i)
785 {
786 len += stuff_byte (i, buf + len);
787 fcs.value = irda_fcs (fcs.value, i);
788 }
789
790 len += stuff_byte (fcs.bytes[0] ^ badcrc, buf + len);
791 len += stuff_byte (fcs.bytes[1] ^ badcrc, buf + len);
792 buf[len++] = EOF;
793 len++;
794 return len;
795}
796
797static int
798toshoboe_probefail (struct toshoboe_cb *self, char *msg)
799{
800 printk (KERN_ERR DRIVER_NAME "probe(%d) failed %s\n",self-> speed, msg);
801 toshoboe_dumpregs (self);
802 toshoboe_stopchip (self);
803 free_irq (self->io.irq, (void *) self);
804 return 0;
805}
806
807static int
808toshoboe_numvalidrcvs (struct toshoboe_cb *self)
809{
810 int i, ret = 0;
811 for (i = 0; i < RX_SLOTS; ++i)
812 if ((self->ring->rx[i].control & 0xe0) == 0)
813 ret++;
814
815 return ret;
816}
817
818static int
819toshoboe_numrcvs (struct toshoboe_cb *self)
820{
821 int i, ret = 0;
822 for (i = 0; i < RX_SLOTS; ++i)
823 if (!(self->ring->rx[i].control & OBOE_CTL_RX_HW_OWNS))
824 ret++;
825
826 return ret;
827}
828
829static int
830toshoboe_probe (struct toshoboe_cb *self)
831{
832 int i, j, n;
833#ifdef USE_MIR
834 int bauds[] = { 9600, 115200, 4000000, 1152000 };
835#else
836 int bauds[] = { 9600, 115200, 4000000 };
837#endif
838 unsigned long flags;
839
840 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
841
842 if (request_irq (self->io.irq, toshoboe_probeinterrupt,
843 self->io.irqflags, "toshoboe", (void *) self))
844 {
845 printk (KERN_ERR DRIVER_NAME ": probe failed to allocate irq %d\n",
846 self->io.irq);
847 return 0;
848 }
849
850 /* test 1: SIR filter and back to back */
851
852 for (j = 0; j < (sizeof (bauds) / sizeof (int)); ++j)
853 {
854 int fir = (j > 1);
855 toshoboe_stopchip (self);
856
857
858 spin_lock_irqsave(&self->spinlock, flags);
859 /*Address is already setup */
860 toshoboe_startchip (self);
861 self->int_rx = self->int_tx = 0;
862 self->speed = bauds[j];
863 toshoboe_setbaud (self);
864 toshoboe_initptrs (self);
865 spin_unlock_irqrestore(&self->spinlock, flags);
866
867 self->ring->tx[self->txs].control =
868/* (FIR only) OBOE_CTL_TX_SIP needed for switching to next slot */
869/* MIR: all received data is stored in one slot */
870 (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
871 : OBOE_CTL_TX_HW_OWNS ;
872 self->ring->tx[self->txs].len =
873 toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
874 self->txs++;
875 self->txs %= TX_SLOTS;
876
877 self->ring->tx[self->txs].control =
878 (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_SIP
879 : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
880 self->ring->tx[self->txs].len =
881 toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
882 self->txs++;
883 self->txs %= TX_SLOTS;
884
885 self->ring->tx[self->txs].control =
886 (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
887 : OBOE_CTL_TX_HW_OWNS ;
888 self->ring->tx[self->txs].len =
889 toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
890 self->txs++;
891 self->txs %= TX_SLOTS;
892
893 self->ring->tx[self->txs].control =
894 (fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
895 | OBOE_CTL_TX_SIP | OBOE_CTL_TX_BAD_CRC
896 : OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
897 self->ring->tx[self->txs].len =
898 toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
899 self->txs++;
900 self->txs %= TX_SLOTS;
901
902 toshoboe_dumptx (self);
903 /* Turn on TX and RX and loopback */
904 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
905
906 i = 0;
907 n = fir ? 1 : 4;
908 while (toshoboe_numvalidrcvs (self) != n)
909 {
910 if (i > 4800)
911 return toshoboe_probefail (self, "filter test");
912 udelay ((9600*(TT_LEN+16))/self->speed);
913 i++;
914 }
915
916 n = fir ? 203 : 102;
917 while ((toshoboe_numrcvs(self) != self->int_rx) || (self->int_tx != n))
918 {
919 if (i > 4800)
920 return toshoboe_probefail (self, "interrupt test");
921 udelay ((9600*(TT_LEN+16))/self->speed);
922 i++;
923 }
924 toshoboe_dumprx (self,i);
925
926 }
927
928 /* test 2: SIR in char at a time */
929
930 toshoboe_stopchip (self);
931 self->int_rx = self->int_tx = 0;
932
933 spin_lock_irqsave(&self->spinlock, flags);
934 toshoboe_startchip (self);
935 spin_unlock_irqrestore(&self->spinlock, flags);
936
937 self->async = 1;
938 self->speed = 115200;
939 toshoboe_setbaud (self);
940 self->ring->tx[self->txs].control =
941 OBOE_CTL_TX_RTCENTX | OBOE_CTL_TX_HW_OWNS;
942 self->ring->tx[self->txs].len = 4;
943
944 ((unsigned char *) self->tx_bufs[self->txs])[0] = 'f';
945 ((unsigned char *) self->tx_bufs[self->txs])[1] = 'i';
946 ((unsigned char *) self->tx_bufs[self->txs])[2] = 's';
947 ((unsigned char *) self->tx_bufs[self->txs])[3] = 'h';
948 toshoboe_dumptx (self);
949 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
950
951 i = 0;
952 while (toshoboe_numvalidrcvs (self) != 4)
953 {
954 if (i > 100)
955 return toshoboe_probefail (self, "Async test");
956 udelay (100);
957 i++;
958 }
959
960 while ((toshoboe_numrcvs (self) != self->int_rx) || (self->int_tx != 1))
961 {
962 if (i > 100)
963 return toshoboe_probefail (self, "Async interrupt test");
964 udelay (100);
965 i++;
966 }
967 toshoboe_dumprx (self,i);
968
969 self->async = 0;
970 self->speed = 9600;
971 toshoboe_setbaud (self);
972 toshoboe_stopchip (self);
973
974 free_irq (self->io.irq, (void *) self);
975
976 printk (KERN_WARNING DRIVER_NAME ": Self test passed ok\n");
977
978 return 1;
979}
980#endif
981
982/******************************************************************/
983/* Netdev style code */
984
985/* Transmit something */
986static int
987toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
988{
989 struct toshoboe_cb *self;
990 __s32 speed;
991 int mtt, len, ctl;
992 unsigned long flags;
993 struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
994
995 self = (struct toshoboe_cb *) dev->priv;
996
997 IRDA_ASSERT (self != NULL, return 0; );
998
999 IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __FUNCTION__
1000 ,skb->len,self->txpending,INB (OBOE_ENABLEH));
1001 if (!cb->magic) {
1002 IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __FUNCTION__, cb->magic);
1003#ifdef DUMP_PACKETS
1004 _dumpbufs(skb->data,skb->len,'>');
1005#endif
1006 }
1007
1008 /* change speed pending, wait for its execution */
1009 if (self->new_speed)
1010 return -EBUSY;
1011
1012 /* device stopped (apm) wait for restart */
1013 if (self->stopped)
1014 return -EBUSY;
1015
1016 toshoboe_checkstuck (self);
1017
1018 dev->trans_start = jiffies;
1019
1020 /* Check if we need to change the speed */
1021 /* But not now. Wait after transmission if mtt not required */
1022 speed=irda_get_next_speed(skb);
1023 if ((speed != self->io.speed) && (speed != -1))
1024 {
1025 spin_lock_irqsave(&self->spinlock, flags);
1026
1027 if (self->txpending || skb->len)
1028 {
1029 self->new_speed = speed;
1030 IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" ,
1031 __FUNCTION__, speed);
1032 /* if no data, that's all! */
1033 if (!skb->len)
1034 {
1035 spin_unlock_irqrestore(&self->spinlock, flags);
1036 dev_kfree_skb (skb);
1037 return 0;
1038 }
1039 /* True packet, go on, but */
1040 /* do not accept anything before change speed execution */
1041 netif_stop_queue(dev);
1042 /* ready to process TxDone interrupt */
1043 spin_unlock_irqrestore(&self->spinlock, flags);
1044 }
1045 else
1046 {
1047 /* idle and no data, change speed now */
1048 self->speed = speed;
1049 toshoboe_setbaud (self);
1050 spin_unlock_irqrestore(&self->spinlock, flags);
1051 dev_kfree_skb (skb);
1052 return 0;
1053 }
1054
1055 }
1056
1057 if ((mtt = irda_get_mtt(skb)))
1058 {
1059 /* This is fair since the queue should be empty anyway */
1060 spin_lock_irqsave(&self->spinlock, flags);
1061
1062 if (self->txpending)
1063 {
1064 spin_unlock_irqrestore(&self->spinlock, flags);
1065 return -EBUSY;
1066 }
1067
1068 /* If in SIR mode we need to generate a string of XBOFs */
1069 /* In MIR and FIR we need to generate a string of data */
1070 /* which we will add a wrong checksum to */
1071
1072 mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
1073 IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __FUNCTION__
1074 ,skb->len,mtt,self->txpending);
1075 if (mtt)
1076 {
1077 self->ring->tx[self->txs].len = mtt & 0xfff;
1078
1079 ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
1080 if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
1081 {
1082 ctl |= OBOE_CTL_TX_BAD_CRC | OBOE_CTL_TX_SIP ;
1083 }
1084#ifdef USE_MIR
1085 else if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_MIRON)
1086 {
1087 ctl |= OBOE_CTL_TX_BAD_CRC;
1088 }
1089#endif
1090 self->ring->tx[self->txs].control = ctl;
1091
1092 OUTB (0x0, OBOE_ENABLEH);
1093 /* It is only a timer. Do not send mtt packet outside! */
1094 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
1095
1096 self->txpending++;
1097
1098 self->txs++;
1099 self->txs %= TX_SLOTS;
1100
1101 }
1102 else
1103 {
1104 printk(KERN_ERR DRIVER_NAME ": problem with mtt packet - ignored\n");
1105 }
1106 spin_unlock_irqrestore(&self->spinlock, flags);
1107 }
1108
1109#ifdef DUMP_PACKETS
1110dumpbufs(skb->data,skb->len,'>');
1111#endif
1112
1113 spin_lock_irqsave(&self->spinlock, flags);
1114
1115 if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
1116 {
1117 IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __FUNCTION__
1118 ,skb->len, self->ring->tx[self->txs].control, self->txpending);
1119 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
1120 spin_unlock_irqrestore(&self->spinlock, flags);
1121 return -EBUSY;
1122 }
1123
1124 if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_SIRON)
1125 {
1126 len = async_wrap_skb (skb, self->tx_bufs[self->txs], TX_BUF_SZ);
1127 }
1128 else
1129 {
1130 len = skb->len;
1131 memcpy (self->tx_bufs[self->txs], skb->data, len);
1132 }
1133 self->ring->tx[self->txs].len = len & 0x0fff;
1134
1135 /*Sometimes the HW doesn't see us assert RTCENTX in the interrupt code */
1136 /*later this plays safe, we garuntee the last packet to be transmitted */
1137 /*has RTCENTX set */
1138
1139 ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
1140 if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
1141 {
1142 ctl |= OBOE_CTL_TX_SIP ;
1143 }
1144 self->ring->tx[self->txs].control = ctl;
1145
1146 /* If transmitter is idle start in one-shot mode */
1147
1148 if (!self->txpending)
1149 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
1150
1151 self->txpending++;
1152
1153 self->txs++;
1154 self->txs %= TX_SLOTS;
1155
1156 spin_unlock_irqrestore(&self->spinlock, flags);
1157 dev_kfree_skb (skb);
1158
1159 return 0;
1160}
1161
1162/*interrupt handler */
1163static irqreturn_t
1164toshoboe_interrupt (int irq, void *dev_id, struct pt_regs *regs)
1165{
1166 struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
1167 __u8 irqstat;
1168 struct sk_buff *skb = NULL;
1169
1170 if (self == NULL && toshoboe_invalid_dev(irq))
1171 return IRQ_NONE;
1172
1173 irqstat = INB (OBOE_ISR);
1174
1175/* was it us */
1176 if (!(irqstat & OBOE_INT_MASK))
1177 return IRQ_NONE;
1178
1179/* Ack all the interrupts */
1180 OUTB (irqstat, OBOE_ISR);
1181
1182 toshoboe_isntstuck (self);
1183
1184/* Txdone */
1185 if (irqstat & OBOE_INT_TXDONE)
1186 {
1187 int txp, txpc;
1188 int i;
1189
1190 txp = self->txpending;
1191 self->txpending = 0;
1192
1193 for (i = 0; i < TX_SLOTS; ++i)
1194 {
1195 if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
1196 self->txpending++;
1197 }
1198 IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __FUNCTION__
1199 ,irqstat,txp,self->txpending);
1200
1201 txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
1202
1203 /* Got anything queued ? start it together */
1204 if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
1205 {
1206 txpc = txp;
1207#ifdef OPTIMIZE_TX
1208 while (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
1209 {
1210 txp = txpc;
1211 txpc++;
1212 txpc %= TX_SLOTS;
1213 self->stats.tx_packets++;
1214 if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
1215 self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX;
1216 }
1217 self->stats.tx_packets--;
1218#else
1219 self->stats.tx_packets++;
1220#endif
1221 toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
1222 }
1223
1224 if ((!self->txpending) && (self->new_speed))
1225 {
1226 self->speed = self->new_speed;
1227 IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n",
1228 __FUNCTION__, self->speed);
1229 toshoboe_setbaud (self);
1230 }
1231
1232 /* Tell network layer that we want more frames */
1233 if (!self->new_speed)
1234 netif_wake_queue(self->netdev);
1235 }
1236
1237 if (irqstat & OBOE_INT_RXDONE)
1238 {
1239 while (!(self->ring->rx[self->rxs].control & OBOE_CTL_RX_HW_OWNS))
1240 {
1241 int len = self->ring->rx[self->rxs].len;
1242 skb = NULL;
1243 IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __FUNCTION__
1244 ,len,self->ring->rx[self->rxs].control);
1245
1246#ifdef DUMP_PACKETS
1247dumpbufs(self->rx_bufs[self->rxs],len,'<');
1248#endif
1249
1250 if (self->ring->rx[self->rxs].control == 0)
1251 {
1252 __u8 enable = INB (OBOE_ENABLEH);
1253
1254 /* In SIR mode we need to check the CRC as this */
1255 /* hasn't been done by the hardware */
1256 if (enable & OBOE_ENABLEH_SIRON)
1257 {
1258 if (!toshoboe_checkfcs (self->rx_bufs[self->rxs], len))
1259 len = 0;
1260 /*Trim off the CRC */
1261 if (len > 1)
1262 len -= 2;
1263 else
1264 len = 0;
1265 IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __FUNCTION__, len,enable);
1266 }
1267
1268#ifdef USE_MIR
1269 else if (enable & OBOE_ENABLEH_MIRON)
1270 {
1271 if (len > 1)
1272 len -= 2;
1273 else
1274 len = 0;
1275 IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __FUNCTION__, len,enable);
1276 }
1277#endif
1278 else if (enable & OBOE_ENABLEH_FIRON)
1279 {
1280 if (len > 3)
1281 len -= 4; /*FIXME: check this */
1282 else
1283 len = 0;
1284 IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __FUNCTION__, len,enable);
1285 }
1286 else
1287 IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __FUNCTION__, len,enable);
1288
1289 if (len)
1290 {
1291 skb = dev_alloc_skb (len + 1);
1292 if (skb)
1293 {
1294 skb_reserve (skb, 1);
1295
1296 skb_put (skb, len);
1297 memcpy (skb->data, self->rx_bufs[self->rxs], len);
1298
1299 self->stats.rx_packets++;
1300 skb->dev = self->netdev;
1301 skb->mac.raw = skb->data;
1302 skb->protocol = htons (ETH_P_IRDA);
1303 }
1304 else
1305 {
1306 printk (KERN_INFO
1307 "%s(), memory squeeze, dropping frame.\n",
1308 __FUNCTION__);
1309 }
1310 }
1311 }
1312 else
1313 {
1314 /* TODO: =========================================== */
1315 /* if OBOE_CTL_RX_LENGTH, our buffers are too small */
1316 /* (MIR or FIR) data is lost. */
1317 /* (SIR) data is splitted in several slots. */
1318 /* we have to join all the received buffers received */
1319 /*in a large buffer before checking CRC. */
1320 IRDA_DEBUG (0, "%s.err:%x(%x)\n", __FUNCTION__
1321 ,len,self->ring->rx[self->rxs].control);
1322 }
1323
1324 self->ring->rx[self->rxs].len = 0x0;
1325 self->ring->rx[self->rxs].control = OBOE_CTL_RX_HW_OWNS;
1326
1327 self->rxs++;
1328 self->rxs %= RX_SLOTS;
1329
1330 if (skb)
1331 netif_rx (skb);
1332
1333 }
1334 }
1335
1336 if (irqstat & OBOE_INT_TXUNDER)
1337 {
1338 printk (KERN_WARNING DRIVER_NAME ": tx fifo underflow\n");
1339 }
1340 if (irqstat & OBOE_INT_RXOVER)
1341 {
1342 printk (KERN_WARNING DRIVER_NAME ": rx fifo overflow\n");
1343 }
1344/* This must be useful for something... */
1345 if (irqstat & OBOE_INT_SIP)
1346 {
1347 self->int_sip++;
1348 IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __FUNCTION__
1349 ,self->int_sip,irqstat,self->txpending);
1350 }
1351 return IRQ_HANDLED;
1352}
1353
1354
1355static int
1356toshoboe_net_open (struct net_device *dev)
1357{
1358 struct toshoboe_cb *self;
1359 unsigned long flags;
1360
1361 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
1362
1363 IRDA_ASSERT (dev != NULL, return -1; );
1364 self = (struct toshoboe_cb *) dev->priv;
1365
1366 IRDA_ASSERT (self != NULL, return 0; );
1367
1368 if (self->async)
1369 return -EBUSY;
1370
1371 if (self->stopped)
1372 return 0;
1373
1374 if (request_irq (self->io.irq, toshoboe_interrupt,
1375 SA_SHIRQ | SA_INTERRUPT, dev->name, (void *) self))
1376 {
1377 return -EAGAIN;
1378 }
1379
1380 spin_lock_irqsave(&self->spinlock, flags);
1381 toshoboe_startchip (self);
1382 spin_unlock_irqrestore(&self->spinlock, flags);
1383
1384 /* Ready to play! */
1385 netif_start_queue(dev);
1386
1387 /*
1388 * Open new IrLAP layer instance, now that everything should be
1389 * initialized properly
1390 */
1391 self->irlap = irlap_open (dev, &self->qos, driver_name);
1392
1393 self->irdad = 1;
1394
1395 return 0;
1396}
1397
1398static int
1399toshoboe_net_close (struct net_device *dev)
1400{
1401 struct toshoboe_cb *self;
1402
1403 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
1404
1405 IRDA_ASSERT (dev != NULL, return -1; );
1406 self = (struct toshoboe_cb *) dev->priv;
1407
1408 /* Stop device */
1409 netif_stop_queue(dev);
1410
1411 /* Stop and remove instance of IrLAP */
1412 if (self->irlap)
1413 irlap_close (self->irlap);
1414 self->irlap = NULL;
1415
1416 self->irdad = 0;
1417
1418 free_irq (self->io.irq, (void *) self);
1419
1420 if (!self->stopped)
1421 {
1422 toshoboe_stopchip (self);
1423 }
1424
1425 return 0;
1426}
1427
1428/*
1429 * Function toshoboe_net_ioctl (dev, rq, cmd)
1430 *
1431 * Process IOCTL commands for this device
1432 *
1433 */
1434static int
1435toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1436{
1437 struct if_irda_req *irq = (struct if_irda_req *) rq;
1438 struct toshoboe_cb *self;
1439 unsigned long flags;
1440 int ret = 0;
1441
1442 IRDA_ASSERT (dev != NULL, return -1; );
1443
1444 self = dev->priv;
1445
1446 IRDA_ASSERT (self != NULL, return -1; );
1447
1448 IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
1449
1450 /* Disable interrupts & save flags */
1451 spin_lock_irqsave(&self->spinlock, flags);
1452
1453 switch (cmd)
1454 {
1455 case SIOCSBANDWIDTH: /* Set bandwidth */
1456 /* This function will also be used by IrLAP to change the
1457 * speed, so we still must allow for speed change within
1458 * interrupt context.
1459 */
1460 IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __FUNCTION__
1461 ,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate );
1462 if (!in_interrupt () && !capable (CAP_NET_ADMIN))
1463 return -EPERM;
1464
1465 /* self->speed=irq->ifr_baudrate; */
1466 /* toshoboe_setbaud(self); */
1467 /* Just change speed once - inserted by Paul Bristow */
1468 self->new_speed = irq->ifr_baudrate;
1469 break;
1470 case SIOCSMEDIABUSY: /* Set media busy */
1471 IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __FUNCTION__
1472 ,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) );
1473 if (!capable (CAP_NET_ADMIN))
1474 return -EPERM;
1475 irda_device_set_media_busy (self->netdev, TRUE);
1476 break;
1477 case SIOCGRECEIVING: /* Check if we are receiving right now */
1478 irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
1479 IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __FUNCTION__
1480 ,dev->name, INB (OBOE_STATUS), irq->ifr_receiving );
1481 break;
1482 default:
1483 IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
1484 ret = -EOPNOTSUPP;
1485 }
1486
1487 spin_unlock_irqrestore(&self->spinlock, flags);
1488 return ret;
1489
1490}
1491
1492MODULE_DESCRIPTION("Toshiba OBOE IrDA Device Driver");
1493MODULE_AUTHOR("James McKenzie <james@fishsoup.dhs.org>");
1494MODULE_LICENSE("GPL");
1495
1496module_param (max_baud, int, 0);
1497MODULE_PARM_DESC(max_baud, "Maximum baud rate");
1498
1499#ifdef USE_PROBE
1500module_param (do_probe, bool, 0);
1501MODULE_PARM_DESC(do_probe, "Enable/disable chip probing and self-test");
1502#endif
1503
1504static void
1505toshoboe_close (struct pci_dev *pci_dev)
1506{
1507 int i;
1508 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
1509
1510 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
1511
1512 IRDA_ASSERT (self != NULL, return; );
1513
1514 if (!self->stopped)
1515 {
1516 toshoboe_stopchip (self);
1517 }
1518
1519 release_region (self->io.fir_base, self->io.fir_ext);
1520
1521 for (i = 0; i < TX_SLOTS; ++i)
1522 {
1523 kfree (self->tx_bufs[i]);
1524 self->tx_bufs[i] = NULL;
1525 }
1526
1527 for (i = 0; i < RX_SLOTS; ++i)
1528 {
1529 kfree (self->rx_bufs[i]);
1530 self->rx_bufs[i] = NULL;
1531 }
1532
1533 unregister_netdev(self->netdev);
1534
1535 kfree (self->ringbuf);
1536 self->ringbuf = NULL;
1537 self->ring = NULL;
1538
1539 free_netdev(self->netdev);
1540}
1541
1542static int
1543toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1544{
1545 struct toshoboe_cb *self;
1546 struct net_device *dev;
1547 int i = 0;
1548 int ok = 0;
1549 int err;
1550
1551 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
1552
1553 if ((err=pci_enable_device(pci_dev)))
1554 return err;
1555
1556 dev = alloc_irdadev(sizeof (struct toshoboe_cb));
1557 if (dev == NULL)
1558 {
1559 printk (KERN_ERR DRIVER_NAME ": can't allocate memory for "
1560 "IrDA control block\n");
1561 return -ENOMEM;
1562 }
1563
1564 self = dev->priv;
1565 self->netdev = dev;
1566 self->pdev = pci_dev;
1567 self->base = pci_resource_start(pci_dev,0);
1568
1569 self->io.fir_base = self->base;
1570 self->io.fir_ext = OBOE_IO_EXTENT;
1571 self->io.irq = pci_dev->irq;
1572 self->io.irqflags = SA_SHIRQ | SA_INTERRUPT;
1573
1574 self->speed = self->io.speed = 9600;
1575 self->async = 0;
1576
1577 /* Lock the port that we need */
1578 if (NULL==request_region (self->io.fir_base, self->io.fir_ext, driver_name))
1579 {
1580 printk (KERN_ERR DRIVER_NAME ": can't get iobase of 0x%03x\n"
1581 ,self->io.fir_base);
1582 err = -EBUSY;
1583 goto freeself;
1584 }
1585
1586 spin_lock_init(&self->spinlock);
1587
1588 irda_init_max_qos_capabilies (&self->qos);
1589 self->qos.baud_rate.bits = 0;
1590
1591 if (max_baud >= 2400)
1592 self->qos.baud_rate.bits |= IR_2400;
1593 /*if (max_baud>=4800) idev->qos.baud_rate.bits|=IR_4800; */
1594 if (max_baud >= 9600)
1595 self->qos.baud_rate.bits |= IR_9600;
1596 if (max_baud >= 19200)
1597 self->qos.baud_rate.bits |= IR_19200;
1598 if (max_baud >= 115200)
1599 self->qos.baud_rate.bits |= IR_115200;
1600#ifdef USE_MIR
1601 if (max_baud >= 1152000)
1602 {
1603 self->qos.baud_rate.bits |= IR_1152000;
1604 }
1605#endif
1606 if (max_baud >= 4000000)
1607 {
1608 self->qos.baud_rate.bits |= (IR_4000000 << 8);
1609 }
1610
1611 /*FIXME: work this out... */
1612 self->qos.min_turn_time.bits = 0xff;
1613
1614 irda_qos_bits_to_value (&self->qos);
1615
1616 /* Allocate twice the size to guarantee alignment */
1617 self->ringbuf = (void *) kmalloc (OBOE_RING_LEN << 1, GFP_KERNEL);
1618 if (!self->ringbuf)
1619 {
1620 printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n");
1621 err = -ENOMEM;
1622 goto freeregion;
1623 }
1624
1625#if (BITS_PER_LONG == 64)
1626#error broken on 64-bit: casts pointer to 32-bit, and then back to pointer.
1627#endif
1628
1629 /*We need to align the taskfile on a taskfile size boundary */
1630 {
1631 unsigned long addr;
1632
1633 addr = (__u32) self->ringbuf;
1634 addr &= ~(OBOE_RING_LEN - 1);
1635 addr += OBOE_RING_LEN;
1636 self->ring = (struct OboeRing *) addr;
1637 }
1638
1639 memset (self->ring, 0, OBOE_RING_LEN);
1640 self->io.mem_base = (__u32) self->ring;
1641
1642 ok = 1;
1643 for (i = 0; i < TX_SLOTS; ++i)
1644 {
1645 self->tx_bufs[i] = kmalloc (TX_BUF_SZ, GFP_KERNEL);
1646 if (!self->tx_bufs[i])
1647 ok = 0;
1648 }
1649
1650 for (i = 0; i < RX_SLOTS; ++i)
1651 {
1652 self->rx_bufs[i] = kmalloc (RX_BUF_SZ, GFP_KERNEL);
1653 if (!self->rx_bufs[i])
1654 ok = 0;
1655 }
1656
1657 if (!ok)
1658 {
1659 printk (KERN_ERR DRIVER_NAME ": can't allocate rx/tx buffers\n");
1660 err = -ENOMEM;
1661 goto freebufs;
1662 }
1663
1664
1665#ifdef USE_PROBE
1666 if (do_probe)
1667 if (!toshoboe_probe (self))
1668 {
1669 err = -ENODEV;
1670 goto freebufs;
1671 }
1672#endif
1673
1674 SET_MODULE_OWNER(dev);
1675 SET_NETDEV_DEV(dev, &pci_dev->dev);
1676 dev->hard_start_xmit = toshoboe_hard_xmit;
1677 dev->open = toshoboe_net_open;
1678 dev->stop = toshoboe_net_close;
1679 dev->do_ioctl = toshoboe_net_ioctl;
1680
1681 err = register_netdev(dev);
1682 if (err)
1683 {
1684 printk (KERN_ERR DRIVER_NAME ": register_netdev() failed\n");
1685 err = -ENOMEM;
1686 goto freebufs;
1687 }
1688 printk (KERN_INFO "IrDA: Registered device %s\n", dev->name);
1689
1690 pci_set_drvdata(pci_dev,self);
1691
1692 printk (KERN_INFO DRIVER_NAME ": Using multiple tasks, version %s\n", rcsid);
1693
1694 return 0;
1695
1696freebufs:
1697 for (i = 0; i < TX_SLOTS; ++i)
1698 if (self->tx_bufs[i])
1699 kfree (self->tx_bufs[i]);
1700 for (i = 0; i < RX_SLOTS; ++i)
1701 if (self->rx_bufs[i])
1702 kfree (self->rx_bufs[i]);
1703 kfree(self->ringbuf);
1704
1705freeregion:
1706 release_region (self->io.fir_base, self->io.fir_ext);
1707
1708freeself:
1709 free_netdev(dev);
1710
1711 return err;
1712}
1713
1714static int
1715toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
1716{
1717 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
1718 unsigned long flags;
1719 int i = 10;
1720
1721 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
1722
1723 if (!self || self->stopped)
1724 return 0;
1725
1726 if ((!self->irdad) && (!self->async))
1727 return 0;
1728
1729/* Flush all packets */
1730 while ((i--) && (self->txpending))
1731 udelay (10000);
1732
1733 spin_lock_irqsave(&self->spinlock, flags);
1734
1735 toshoboe_stopchip (self);
1736 self->stopped = 1;
1737 self->txpending = 0;
1738
1739 spin_unlock_irqrestore(&self->spinlock, flags);
1740 return 0;
1741}
1742
1743static int
1744toshoboe_wakeup (struct pci_dev *pci_dev)
1745{
1746 struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
1747 unsigned long flags;
1748
1749 IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
1750
1751 if (!self || !self->stopped)
1752 return 0;
1753
1754 if ((!self->irdad) && (!self->async))
1755 return 0;
1756
1757 spin_lock_irqsave(&self->spinlock, flags);
1758
1759 toshoboe_startchip (self);
1760 self->stopped = 0;
1761
1762 netif_wake_queue(self->netdev);
1763 spin_unlock_irqrestore(&self->spinlock, flags);
1764 return 0;
1765}
1766
1767static struct pci_driver donauboe_pci_driver = {
1768 .name = "donauboe",
1769 .id_table = toshoboe_pci_tbl,
1770 .probe = toshoboe_open,
1771 .remove = toshoboe_close,
1772 .suspend = toshoboe_gotosleep,
1773 .resume = toshoboe_wakeup
1774};
1775
1776static int __init
1777donauboe_init (void)
1778{
1779 return pci_module_init(&donauboe_pci_driver);
1780}
1781
1782static void __exit
1783donauboe_cleanup (void)
1784{
1785 pci_unregister_driver(&donauboe_pci_driver);
1786}
1787
1788module_init(donauboe_init);
1789module_exit(donauboe_cleanup);
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
new file mode 100644
index 000000000000..2ab173d9a0e4
--- /dev/null
+++ b/drivers/net/irda/donauboe.h
@@ -0,0 +1,363 @@
1/*********************************************************************
2 *
3 * Filename: toshoboe.h
4 * Version: 2.16
5 * Description: Driver for the Toshiba OBOE (or type-O or 701)
6 * FIR Chipset, also supports the DONAUOBOE (type-DO
7 * or d01) FIR chipset which as far as I know is
8 * register compatible.
9 * Status: Experimental.
10 * Author: James McKenzie <james@fishsoup.dhs.org>
11 * Created at: Sat May 8 12:35:27 1999
12 * Modified: 2.16 Martin Lucina <mato@kotelna.sk>
13 * Modified: 2.16 Sat Jun 22 18:54:29 2002 (sync headers)
14 * Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
15 * Modified: 2.17 jeu sep 12 08:50:20 2002 (add lock to be used by spinlocks)
16 *
17 * Copyright (c) 1999 James McKenzie, All Rights Reserved.
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License as
21 * published by the Free Software Foundation; either version 2 of
22 * the License, or (at your option) any later version.
23 *
24 * Neither James McKenzie nor Cambridge University admit liability nor
25 * provide warranty for any of this software. This material is
26 * provided "AS-IS" and at no charge.
27 *
28 * Applicable Models : Libretto 100/110CT and many more.
29 * Toshiba refers to this chip as the type-O IR port,
30 * or the type-DO IR port.
31 *
32 * IrDA chip set list from Toshiba Computer Engineering Corp.
33 * model method maker controler Version
34 * Portege 320CT FIR,SIR Toshiba Oboe(Triangle)
35 * Portege 3010CT FIR,SIR Toshiba Oboe(Sydney)
36 * Portege 3015CT FIR,SIR Toshiba Oboe(Sydney)
37 * Portege 3020CT FIR,SIR Toshiba Oboe(Sydney)
38 * Portege 7020CT FIR,SIR ? ?
39 *
40 * Satell. 4090XCDT FIR,SIR ? ?
41 *
42 * Libretto 100CT FIR,SIR Toshiba Oboe
43 * Libretto 1000CT FIR,SIR Toshiba Oboe
44 *
45 * TECRA750DVD FIR,SIR Toshiba Oboe(Triangle) REV ID=14h
46 * TECRA780 FIR,SIR Toshiba Oboe(Sandlot) REV ID=32h,33h
47 * TECRA750CDT FIR,SIR Toshiba Oboe(Triangle) REV ID=13h,14h
48 * TECRA8000 FIR,SIR Toshiba Oboe(ISKUR) REV ID=23h
49 *
50 ********************************************************************/
51
52/* The documentation for this chip is allegedly released */
53/* However I have not seen it, not have I managed to contact */
54/* anyone who has. HOWEVER the chip bears a striking resemblence */
55/* to the IrDA controller in the Toshiba RISC TMPR3922 chip */
56/* the documentation for this is freely available at */
57/* http://www.toshiba.com/taec/components/Generic/TMPR3922.shtml */
58/* The mapping between the registers in that document and the */
59/* Registers in the 701 oboe chip are as follows */
60
61
62/* 3922 reg 701 regs, by bit numbers */
63/* 7- 0 15- 8 24-16 31-25 */
64/* $28 0x0 0x1 */
65/* $2c SEE NOTE 1 */
66/* $30 0x6 0x7 */
67/* $34 0x8 0x9 SEE NOTE 2 */
68/* $38 0x10 0x11 */
69/* $3C 0xe SEE NOTE 3 */
70/* $40 0x12 0x13 */
71/* $44 0x14 0x15 */
72/* $48 0x16 0x17 */
73/* $4c 0x18 0x19 */
74/* $50 0x1a 0x1b */
75
76/* FIXME: could be 0x1b 0x1a here */
77
78/* $54 0x1d 0x1c */
79/* $5C 0xf SEE NOTE 4 */
80/* $130 SEE NOTE 5 */
81/* $134 SEE NOTE 6 */
82/* */
83/* NOTES: */
84/* 1. The pointer to ring is packed in most unceremoniusly */
85/* 701 Register Address bits (A9-A0 must be zero) */
86/* 0x4: A17 A16 A15 A14 A13 A12 A11 A10 */
87/* 0x5: A25 A24 A23 A22 A21 A20 A19 A18 */
88/* 0x2: 0 0 A31 A30 A29 A28 A27 A26 */
89/* */
90/* 2. The M$ drivers do a write 0x1 to 0x9, however the 3922 */
91/* documentation would suggest that a write of 0x1 to 0x8 */
92/* would be more appropriate. */
93/* */
94/* 3. This assignment is tenuous at best, register 0xe seems to */
95/* have bits arranged 0 0 0 R/W R/W R/W R/W R/W */
96/* if either of the lower two bits are set the chip seems to */
97/* switch off */
98/* */
99/* 4. Bits 7-4 seem to be different 4 seems just to be generic */
100/* receiver busy flag */
101/* */
102/* 5. and 6. The IER and ISR have a different bit assignment */
103/* The lower three bits of both read back as ones */
104/* ISR is register 0xc, IER is register 0xd */
105/* 7 6 5 4 3 2 1 0 */
106/* 0xc: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
107/* 0xd: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
108/* TxDone xmitt done (generated only if generate interrupt bit */
109/* is set in the ring) */
110/* RxDone recv completed (or other recv condition if you set it */
111/* up */
112/* TxUnder underflow in Transmit FIFO */
113/* RxOver overflow in Recv FIFO */
114/* SipRcv received serial gap (or other condition you set) */
115/* Interrupts are enabled by writing a one to the IER register */
116/* Interrupts are cleared by writting a one to the ISR register */
117/* */
118/* 6. The remaining registers: 0x6 and 0x3 appear to be */
119/* reserved parts of 16 or 32 bit registersthe remainder */
120/* 0xa 0xb 0x1e 0x1f could possibly be (by their behaviour) */
121/* the Unicast Filter register at $58. */
122/* */
123/* 7. While the core obviously expects 32 bit accesses all the */
124/* M$ drivers do 8 bit accesses, infact the Miniport ones */
125/* write and read back the byte serveral times (why?) */
126
127
128#ifndef TOSHOBOE_H
129#define TOSHOBOE_H
130
131/* Registers */
132
133#define OBOE_IO_EXTENT 0x1f
134
135/*Receive and transmit slot pointers */
136#define OBOE_REG(i) (i+(self->base))
137#define OBOE_RXSLOT OBOE_REG(0x0)
138#define OBOE_TXSLOT OBOE_REG(0x1)
139#define OBOE_SLOT_MASK 0x3f
140
141#define OBOE_TXRING_OFFSET 0x200
142#define OBOE_TXRING_OFFSET_IN_SLOTS 0x40
143
144/*pointer to the ring */
145#define OBOE_RING_BASE0 OBOE_REG(0x4)
146#define OBOE_RING_BASE1 OBOE_REG(0x5)
147#define OBOE_RING_BASE2 OBOE_REG(0x2)
148#define OBOE_RING_BASE3 OBOE_REG(0x3)
149
150/*Number of slots in the ring */
151#define OBOE_RING_SIZE OBOE_REG(0x7)
152#define OBOE_RING_SIZE_RX4 0x00
153#define OBOE_RING_SIZE_RX8 0x01
154#define OBOE_RING_SIZE_RX16 0x03
155#define OBOE_RING_SIZE_RX32 0x07
156#define OBOE_RING_SIZE_RX64 0x0f
157#define OBOE_RING_SIZE_TX4 0x00
158#define OBOE_RING_SIZE_TX8 0x10
159#define OBOE_RING_SIZE_TX16 0x30
160#define OBOE_RING_SIZE_TX32 0x70
161#define OBOE_RING_SIZE_TX64 0xf0
162
163#define OBOE_RING_MAX_SIZE 64
164
165/*Causes the gubbins to re-examine the ring */
166#define OBOE_PROMPT OBOE_REG(0x9)
167#define OBOE_PROMPT_BIT 0x1
168
169/* Interrupt Status Register */
170#define OBOE_ISR OBOE_REG(0xc)
171/* Interrupt Enable Register */
172#define OBOE_IER OBOE_REG(0xd)
173/* Interrupt bits for IER and ISR */
174#define OBOE_INT_TXDONE 0x80
175#define OBOE_INT_RXDONE 0x40
176#define OBOE_INT_TXUNDER 0x20
177#define OBOE_INT_RXOVER 0x10
178#define OBOE_INT_SIP 0x08
179#define OBOE_INT_MASK 0xf8
180
181/*Reset Register */
182#define OBOE_CONFIG1 OBOE_REG(0xe)
183#define OBOE_CONFIG1_RST 0x01
184#define OBOE_CONFIG1_DISABLE 0x02
185#define OBOE_CONFIG1_4 0x08
186#define OBOE_CONFIG1_8 0x08
187
188#define OBOE_CONFIG1_ON 0x8
189#define OBOE_CONFIG1_RESET 0xf
190#define OBOE_CONFIG1_OFF 0xe
191
192#define OBOE_STATUS OBOE_REG(0xf)
193#define OBOE_STATUS_RXBUSY 0x10
194#define OBOE_STATUS_FIRRX 0x04
195#define OBOE_STATUS_MIRRX 0x02
196#define OBOE_STATUS_SIRRX 0x01
197
198
199/*Speed control registers */
200#define OBOE_CONFIG0L OBOE_REG(0x10)
201#define OBOE_CONFIG0H OBOE_REG(0x11)
202
203#define OBOE_CONFIG0H_TXONLOOP 0x80 /*Transmit when looping (dangerous) */
204#define OBOE_CONFIG0H_LOOP 0x40 /*Loopback Tx->Rx */
205#define OBOE_CONFIG0H_ENTX 0x10 /*Enable Tx */
206#define OBOE_CONFIG0H_ENRX 0x08 /*Enable Rx */
207#define OBOE_CONFIG0H_ENDMAC 0x04 /*Enable/reset* the DMA controller */
208#define OBOE_CONFIG0H_RCVANY 0x02 /*DMA mode 1=bytes, 0=dwords */
209
210#define OBOE_CONFIG0L_CRC16 0x80 /*CRC 1=16 bit 0=32 bit */
211#define OBOE_CONFIG0L_ENFIR 0x40 /*Enable FIR */
212#define OBOE_CONFIG0L_ENMIR 0x20 /*Enable MIR */
213#define OBOE_CONFIG0L_ENSIR 0x10 /*Enable SIR */
214#define OBOE_CONFIG0L_ENSIRF 0x08 /*Enable SIR framer */
215#define OBOE_CONFIG0L_SIRTEST 0x04 /*Enable SIR framer in MIR and FIR */
216#define OBOE_CONFIG0L_INVERTTX 0x02 /*Invert Tx Line */
217#define OBOE_CONFIG0L_INVERTRX 0x01 /*Invert Rx Line */
218
219#define OBOE_BOF OBOE_REG(0x12)
220#define OBOE_EOF OBOE_REG(0x13)
221
222#define OBOE_ENABLEL OBOE_REG(0x14)
223#define OBOE_ENABLEH OBOE_REG(0x15)
224
225#define OBOE_ENABLEH_PHYANDCLOCK 0x80 /*Toggle low to copy config in */
226#define OBOE_ENABLEH_CONFIGERR 0x40
227#define OBOE_ENABLEH_FIRON 0x20
228#define OBOE_ENABLEH_MIRON 0x10
229#define OBOE_ENABLEH_SIRON 0x08
230#define OBOE_ENABLEH_ENTX 0x04
231#define OBOE_ENABLEH_ENRX 0x02
232#define OBOE_ENABLEH_CRC16 0x01
233
234#define OBOE_ENABLEL_BROADCAST 0x01
235
236#define OBOE_CURR_PCONFIGL OBOE_REG(0x16) /*Current config */
237#define OBOE_CURR_PCONFIGH OBOE_REG(0x17)
238
239#define OBOE_NEW_PCONFIGL OBOE_REG(0x18)
240#define OBOE_NEW_PCONFIGH OBOE_REG(0x19)
241
242#define OBOE_PCONFIGH_BAUDMASK 0xfc
243#define OBOE_PCONFIGH_WIDTHMASK 0x04
244#define OBOE_PCONFIGL_WIDTHMASK 0xe0
245#define OBOE_PCONFIGL_PREAMBLEMASK 0x1f
246
247#define OBOE_PCONFIG_BAUDMASK 0xfc00
248#define OBOE_PCONFIG_BAUDSHIFT 10
249#define OBOE_PCONFIG_WIDTHMASK 0x04e0
250#define OBOE_PCONFIG_WIDTHSHIFT 5
251#define OBOE_PCONFIG_PREAMBLEMASK 0x001f
252#define OBOE_PCONFIG_PREAMBLESHIFT 0
253
254#define OBOE_MAXLENL OBOE_REG(0x1a)
255#define OBOE_MAXLENH OBOE_REG(0x1b)
256
257#define OBOE_RXCOUNTH OBOE_REG(0x1c) /*Reset on recipt */
258#define OBOE_RXCOUNTL OBOE_REG(0x1d) /*of whole packet */
259
260/* The PCI ID of the OBOE chip */
261#ifndef PCI_DEVICE_ID_FIR701
262#define PCI_DEVICE_ID_FIR701 0x0701
263#endif
264
265#ifndef PCI_DEVICE_ID_FIRD01
266#define PCI_DEVICE_ID_FIRD01 0x0d01
267#endif
268
269struct OboeSlot
270{
271 __u16 len; /*Tweleve bits of packet length */
272 __u8 unused;
273 __u8 control; /*Slot control/status see below */
274 __u32 address; /*Slot buffer address */
275}
276__attribute__ ((packed));
277
278#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
279
280struct OboeRing
281{
282 struct OboeSlot rx[OBOE_NTASKS];
283 struct OboeSlot tx[OBOE_NTASKS];
284};
285
286#define OBOE_RING_LEN (sizeof(struct OboeRing))
287
288
289#define OBOE_CTL_TX_HW_OWNS 0x80 /*W/R This slot owned by the hardware */
290#define OBOE_CTL_TX_DISTX_CRC 0x40 /*W Disable CRC generation for [FM]IR */
291#define OBOE_CTL_TX_BAD_CRC 0x20 /*W Generate bad CRC */
292#define OBOE_CTL_TX_SIP 0x10 /*W Generate an SIP after xmittion */
293#define OBOE_CTL_TX_MKUNDER 0x08 /*W Generate an underrun error */
294#define OBOE_CTL_TX_RTCENTX 0x04 /*W Enable receiver and generate TXdone */
295 /* After this slot is processed */
296#define OBOE_CTL_TX_UNDER 0x01 /*R Set by hardware to indicate underrun */
297
298
299#define OBOE_CTL_RX_HW_OWNS 0x80 /*W/R This slot owned by hardware */
300#define OBOE_CTL_RX_PHYERR 0x40 /*R Decoder error on receiption */
301#define OBOE_CTL_RX_CRCERR 0x20 /*R CRC error only set for [FM]IR */
302#define OBOE_CTL_RX_LENGTH 0x10 /*R Packet > max Rx length */
303#define OBOE_CTL_RX_OVER 0x08 /*R set to indicate an overflow */
304#define OBOE_CTL_RX_SIRBAD 0x04 /*R SIR had BOF in packet or ABORT sequence */
305#define OBOE_CTL_RX_RXEOF 0x02 /*R Finished receiving on this slot */
306
307
308struct toshoboe_cb
309{
310 struct net_device *netdev; /* Yes! we are some kind of netdevice */
311 struct net_device_stats stats;
312 struct tty_driver ttydev;
313
314 struct irlap_cb *irlap; /* The link layer we are binded to */
315
316 chipio_t io; /* IrDA controller information */
317 struct qos_info qos; /* QoS capabilities for this device */
318
319 __u32 flags; /* Interface flags */
320
321 struct pci_dev *pdev; /*PCI device */
322 int base; /*IO base */
323
324
325 int txpending; /*how many tx's are pending */
326 int txs, rxs; /*Which slots are we at */
327
328 int irdad; /*Driver under control of netdev end */
329 int async; /*Driver under control of async end */
330
331
332 int stopped; /*Stopped by some or other APM stuff */
333
334 int filter; /*In SIR mode do we want to receive
335 frames or byte ranges */
336
337 void *ringbuf; /*The ring buffer */
338 struct OboeRing *ring; /*The ring */
339
340 void *tx_bufs[OBOE_RING_MAX_SIZE]; /*The buffers */
341 void *rx_bufs[OBOE_RING_MAX_SIZE];
342
343
344 int speed; /*Current setting of the speed */
345 int new_speed; /*Set to request a speed change */
346
347/* The spinlock protect critical parts of the driver.
348 * Locking is done like this :
349 * spin_lock_irqsave(&self->spinlock, flags);
350 * Releasing the lock :
351 * spin_unlock_irqrestore(&self->spinlock, flags);
352 */
353 spinlock_t spinlock;
354 /* Used for the probe and diagnostics code */
355 int int_rx;
356 int int_tx;
357 int int_txunder;
358 int int_rxover;
359 int int_sip;
360};
361
362
363#endif
diff --git a/drivers/net/irda/ep7211_ir.c b/drivers/net/irda/ep7211_ir.c
new file mode 100644
index 000000000000..31896262d21c
--- /dev/null
+++ b/drivers/net/irda/ep7211_ir.c
@@ -0,0 +1,122 @@
1/*
2 * IR port driver for the Cirrus Logic EP7211 processor.
3 *
4 * Copyright 2001, Blue Mug Inc. All rights reserved.
5 */
6
7#include <linux/module.h>
8#include <linux/delay.h>
9#include <linux/tty.h>
10#include <linux/init.h>
11
12#include <net/irda/irda.h>
13#include <net/irda/irda_device.h>
14
15#include <asm/io.h>
16#include <asm/hardware.h>
17
18#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
19#define MAX_DELAY 10000 /* 1 ms */
20
21static void ep7211_ir_open(dongle_t *self, struct qos_info *qos);
22static void ep7211_ir_close(dongle_t *self);
23static int ep7211_ir_change_speed(struct irda_task *task);
24static int ep7211_ir_reset(struct irda_task *task);
25
26static struct dongle_reg dongle = {
27 .type = IRDA_EP7211_IR,
28 .open = ep7211_ir_open,
29 .close = ep7211_ir_close,
30 .reset = ep7211_ir_reset,
31 .change_speed = ep7211_ir_change_speed,
32 .owner = THIS_MODULE,
33};
34
35static void ep7211_ir_open(dongle_t *self, struct qos_info *qos)
36{
37 unsigned int syscon1, flags;
38
39 save_flags(flags); cli();
40
41 /* Turn on the SIR encoder. */
42 syscon1 = clps_readl(SYSCON1);
43 syscon1 |= SYSCON1_SIREN;
44 clps_writel(syscon1, SYSCON1);
45
46 /* XXX: We should disable modem status interrupts on the first
47 UART (interrupt #14). */
48
49 restore_flags(flags);
50}
51
52static void ep7211_ir_close(dongle_t *self)
53{
54 unsigned int syscon1, flags;
55
56 save_flags(flags); cli();
57
58 /* Turn off the SIR encoder. */
59 syscon1 = clps_readl(SYSCON1);
60 syscon1 &= ~SYSCON1_SIREN;
61 clps_writel(syscon1, SYSCON1);
62
63 /* XXX: If we've disabled the modem status interrupts, we should
64 reset them back to their original state. */
65
66 restore_flags(flags);
67}
68
69/*
70 * Function ep7211_ir_change_speed (task)
71 *
72 * Change speed of the EP7211 I/R port. We don't really have to do anything
73 * for the EP7211 as long as the rate is being changed at the serial port
74 * level.
75 */
76static int ep7211_ir_change_speed(struct irda_task *task)
77{
78 irda_task_next_state(task, IRDA_TASK_DONE);
79 return 0;
80}
81
82/*
83 * Function ep7211_ir_reset (task)
84 *
85 * Reset the EP7211 I/R. We don't really have to do anything.
86 *
87 */
88static int ep7211_ir_reset(struct irda_task *task)
89{
90 irda_task_next_state(task, IRDA_TASK_DONE);
91 return 0;
92}
93
94/*
95 * Function ep7211_ir_init(void)
96 *
97 * Initialize EP7211 I/R module
98 *
99 */
100static int __init ep7211_ir_init(void)
101{
102 return irda_device_register_dongle(&dongle);
103}
104
105/*
106 * Function ep7211_ir_cleanup(void)
107 *
108 * Cleanup EP7211 I/R module
109 *
110 */
111static void __exit ep7211_ir_cleanup(void)
112{
113 irda_device_unregister_dongle(&dongle);
114}
115
116MODULE_AUTHOR("Jon McClintock <jonm@bluemug.com>");
117MODULE_DESCRIPTION("EP7211 I/R driver");
118MODULE_LICENSE("GPL");
119MODULE_ALIAS("irda-dongle-8"); /* IRDA_EP7211_IR */
120
121module_init(ep7211_ir_init);
122module_exit(ep7211_ir_cleanup);
diff --git a/drivers/net/irda/esi-sir.c b/drivers/net/irda/esi-sir.c
new file mode 100644
index 000000000000..a908df7c4b9d
--- /dev/null
+++ b/drivers/net/irda/esi-sir.c
@@ -0,0 +1,159 @@
1/*********************************************************************
2 *
3 * Filename: esi.c
4 * Version: 1.6
5 * Description: Driver for the Extended Systems JetEye PC dongle
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sat Feb 21 18:54:38 1998
9 * Modified at: Sun Oct 27 22:01:04 2002
10 * Modified by: Martin Diehl <mad@mdiehl.de>
11 *
12 * Copyright (c) 1999 Dag Brattli, <dagb@cs.uit.no>,
13 * Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
14 * Copyright (c) 2002 Martin Diehl, <mad@mdiehl.de>,
15 * All Rights Reserved.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
30 * MA 02111-1307 USA
31 *
32 ********************************************************************/
33
34#include <linux/module.h>
35#include <linux/delay.h>
36#include <linux/init.h>
37
38#include <net/irda/irda.h>
39
40#include "sir-dev.h"
41
42static int esi_open(struct sir_dev *);
43static int esi_close(struct sir_dev *);
44static int esi_change_speed(struct sir_dev *, unsigned);
45static int esi_reset(struct sir_dev *);
46
47static struct dongle_driver esi = {
48 .owner = THIS_MODULE,
49 .driver_name = "JetEye PC ESI-9680 PC",
50 .type = IRDA_ESI_DONGLE,
51 .open = esi_open,
52 .close = esi_close,
53 .reset = esi_reset,
54 .set_speed = esi_change_speed,
55};
56
57static int __init esi_sir_init(void)
58{
59 return irda_register_dongle(&esi);
60}
61
62static void __exit esi_sir_cleanup(void)
63{
64 irda_unregister_dongle(&esi);
65}
66
67static int esi_open(struct sir_dev *dev)
68{
69 struct qos_info *qos = &dev->qos;
70
71 /* Power up and set dongle to 9600 baud */
72 sirdev_set_dtr_rts(dev, FALSE, TRUE);
73
74 qos->baud_rate.bits &= IR_9600|IR_19200|IR_115200;
75 qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
76 irda_qos_bits_to_value(qos);
77
78 /* irda thread waits 50 msec for power settling */
79
80 return 0;
81}
82
83static int esi_close(struct sir_dev *dev)
84{
85 /* Power off dongle */
86 sirdev_set_dtr_rts(dev, FALSE, FALSE);
87
88 return 0;
89}
90
91/*
92 * Function esi_change_speed (task)
93 *
94 * Set the speed for the Extended Systems JetEye PC ESI-9680 type dongle
95 * Apparently (see old esi-driver) no delays are needed here...
96 *
97 */
98static int esi_change_speed(struct sir_dev *dev, unsigned speed)
99{
100 int ret = 0;
101 int dtr, rts;
102
103 switch (speed) {
104 case 19200:
105 dtr = TRUE;
106 rts = FALSE;
107 break;
108 case 115200:
109 dtr = rts = TRUE;
110 break;
111 default:
112 ret = -EINVAL;
113 speed = 9600;
114 /* fall through */
115 case 9600:
116 dtr = FALSE;
117 rts = TRUE;
118 break;
119 }
120
121 /* Change speed of dongle */
122 sirdev_set_dtr_rts(dev, dtr, rts);
123 dev->speed = speed;
124
125 return ret;
126}
127
128/*
129 * Function esi_reset (task)
130 *
131 * Reset dongle;
132 *
133 */
134static int esi_reset(struct sir_dev *dev)
135{
136 sirdev_set_dtr_rts(dev, FALSE, FALSE);
137
138 /* Hm, the old esi-driver left the dongle unpowered relying on
139 * the following speed change to repower. This might work for
140 * the esi because we only need the modem lines. However, now the
141 * general rule is reset must bring the dongle to some working
142 * well-known state because speed change might write to registers.
143 * The old esi-driver didn't any delay here - let's hope it' fine.
144 */
145
146 sirdev_set_dtr_rts(dev, FALSE, TRUE);
147 dev->speed = 9600;
148
149 return 0;
150}
151
152MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
153MODULE_DESCRIPTION("Extended Systems JetEye PC dongle driver");
154MODULE_LICENSE("GPL");
155MODULE_ALIAS("irda-dongle-1"); /* IRDA_ESI_DONGLE */
156
157module_init(esi_sir_init);
158module_exit(esi_sir_cleanup);
159
diff --git a/drivers/net/irda/esi.c b/drivers/net/irda/esi.c
new file mode 100644
index 000000000000..d3a61af6402d
--- /dev/null
+++ b/drivers/net/irda/esi.c
@@ -0,0 +1,149 @@
1/*********************************************************************
2 *
3 * Filename: esi.c
4 * Version: 1.5
5 * Description: Driver for the Extended Systems JetEye PC dongle
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sat Feb 21 18:54:38 1998
9 * Modified at: Fri Dec 17 09:14:04 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1999 Dag Brattli, <dagb@cs.uit.no>,
13 * Copyright (c) 1998 Thomas Davis, <ratbert@radiks.net>,
14 * All Rights Reserved.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
29 * MA 02111-1307 USA
30 *
31 ********************************************************************/
32
33#include <linux/module.h>
34#include <linux/delay.h>
35#include <linux/tty.h>
36#include <linux/init.h>
37
38#include <net/irda/irda.h>
39#include <net/irda/irda_device.h>
40
41static void esi_open(dongle_t *self, struct qos_info *qos);
42static void esi_close(dongle_t *self);
43static int esi_change_speed(struct irda_task *task);
44static int esi_reset(struct irda_task *task);
45
46static struct dongle_reg dongle = {
47 .type = IRDA_ESI_DONGLE,
48 .open = esi_open,
49 .close = esi_close,
50 .reset = esi_reset,
51 .change_speed = esi_change_speed,
52 .owner = THIS_MODULE,
53};
54
55static int __init esi_init(void)
56{
57 return irda_device_register_dongle(&dongle);
58}
59
60static void __exit esi_cleanup(void)
61{
62 irda_device_unregister_dongle(&dongle);
63}
64
65static void esi_open(dongle_t *self, struct qos_info *qos)
66{
67 qos->baud_rate.bits &= IR_9600|IR_19200|IR_115200;
68 qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
69}
70
71static void esi_close(dongle_t *dongle)
72{
73 /* Power off dongle */
74 dongle->set_dtr_rts(dongle->dev, FALSE, FALSE);
75}
76
77/*
78 * Function esi_change_speed (task)
79 *
80 * Set the speed for the Extended Systems JetEye PC ESI-9680 type dongle
81 *
82 */
83static int esi_change_speed(struct irda_task *task)
84{
85 dongle_t *self = (dongle_t *) task->instance;
86 __u32 speed = (__u32) task->param;
87 int dtr, rts;
88
89 switch (speed) {
90 case 19200:
91 dtr = TRUE;
92 rts = FALSE;
93 break;
94 case 115200:
95 dtr = rts = TRUE;
96 break;
97 case 9600:
98 default:
99 dtr = FALSE;
100 rts = TRUE;
101 break;
102 }
103
104 /* Change speed of dongle */
105 self->set_dtr_rts(self->dev, dtr, rts);
106 self->speed = speed;
107
108 irda_task_next_state(task, IRDA_TASK_DONE);
109
110 return 0;
111}
112
113/*
114 * Function esi_reset (task)
115 *
116 * Reset dongle;
117 *
118 */
119static int esi_reset(struct irda_task *task)
120{
121 dongle_t *self = (dongle_t *) task->instance;
122
123 self->set_dtr_rts(self->dev, FALSE, FALSE);
124 irda_task_next_state(task, IRDA_TASK_DONE);
125
126 return 0;
127}
128
129MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
130MODULE_DESCRIPTION("Extended Systems JetEye PC dongle driver");
131MODULE_LICENSE("GPL");
132MODULE_ALIAS("irda-dongle-1"); /* IRDA_ESI_DONGLE */
133
134/*
135 * Function init_module (void)
136 *
137 * Initialize ESI module
138 *
139 */
140module_init(esi_init);
141
142/*
143 * Function cleanup_module (void)
144 *
145 * Cleanup ESI module
146 *
147 */
148module_exit(esi_cleanup);
149
diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c
new file mode 100644
index 000000000000..0d2fe87fb9b7
--- /dev/null
+++ b/drivers/net/irda/girbil-sir.c
@@ -0,0 +1,258 @@
1/*********************************************************************
2 *
3 * Filename: girbil.c
4 * Version: 1.2
5 * Description: Implementation for the Greenwich GIrBIL dongle
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sat Feb 6 21:02:33 1999
9 * Modified at: Fri Dec 17 09:13:20 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 * Neither Dag Brattli nor University of Tromsų admit liability nor
20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge.
22 *
23 ********************************************************************/
24
25#include <linux/module.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28
29#include <net/irda/irda.h>
30
31#include "sir-dev.h"
32
33static int girbil_reset(struct sir_dev *dev);
34static int girbil_open(struct sir_dev *dev);
35static int girbil_close(struct sir_dev *dev);
36static int girbil_change_speed(struct sir_dev *dev, unsigned speed);
37
38/* Control register 1 */
39#define GIRBIL_TXEN 0x01 /* Enable transmitter */
40#define GIRBIL_RXEN 0x02 /* Enable receiver */
41#define GIRBIL_ECAN 0x04 /* Cancel self emmited data */
42#define GIRBIL_ECHO 0x08 /* Echo control characters */
43
44/* LED Current Register (0x2) */
45#define GIRBIL_HIGH 0x20
46#define GIRBIL_MEDIUM 0x21
47#define GIRBIL_LOW 0x22
48
49/* Baud register (0x3) */
50#define GIRBIL_2400 0x30
51#define GIRBIL_4800 0x31
52#define GIRBIL_9600 0x32
53#define GIRBIL_19200 0x33
54#define GIRBIL_38400 0x34
55#define GIRBIL_57600 0x35
56#define GIRBIL_115200 0x36
57
58/* Mode register (0x4) */
59#define GIRBIL_IRDA 0x40
60#define GIRBIL_ASK 0x41
61
62/* Control register 2 (0x5) */
63#define GIRBIL_LOAD 0x51 /* Load the new baud rate value */
64
65static struct dongle_driver girbil = {
66 .owner = THIS_MODULE,
67 .driver_name = "Greenwich GIrBIL",
68 .type = IRDA_GIRBIL_DONGLE,
69 .open = girbil_open,
70 .close = girbil_close,
71 .reset = girbil_reset,
72 .set_speed = girbil_change_speed,
73};
74
75static int __init girbil_sir_init(void)
76{
77 return irda_register_dongle(&girbil);
78}
79
80static void __exit girbil_sir_cleanup(void)
81{
82 irda_unregister_dongle(&girbil);
83}
84
85static int girbil_open(struct sir_dev *dev)
86{
87 struct qos_info *qos = &dev->qos;
88
89 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
90
91 /* Power on dongle */
92 sirdev_set_dtr_rts(dev, TRUE, TRUE);
93
94 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
95 qos->min_turn_time.bits = 0x03;
96 irda_qos_bits_to_value(qos);
97
98 /* irda thread waits 50 msec for power settling */
99
100 return 0;
101}
102
103static int girbil_close(struct sir_dev *dev)
104{
105 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
106
107 /* Power off dongle */
108 sirdev_set_dtr_rts(dev, FALSE, FALSE);
109
110 return 0;
111}
112
113/*
114 * Function girbil_change_speed (dev, speed)
115 *
116 * Set the speed for the Girbil type dongle.
117 *
118 */
119
120#define GIRBIL_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
121
122static int girbil_change_speed(struct sir_dev *dev, unsigned speed)
123{
124 unsigned state = dev->fsm.substate;
125 unsigned delay = 0;
126 u8 control[2];
127 static int ret = 0;
128
129 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
130
131 /* dongle alread reset - port and dongle at default speed */
132
133 switch(state) {
134
135 case SIRDEV_STATE_DONGLE_SPEED:
136
137 /* Set DTR and Clear RTS to enter command mode */
138 sirdev_set_dtr_rts(dev, FALSE, TRUE);
139
140 udelay(25); /* better wait a little while */
141
142 ret = 0;
143 switch (speed) {
144 default:
145 ret = -EINVAL;
146 /* fall through */
147 case 9600:
148 control[0] = GIRBIL_9600;
149 break;
150 case 19200:
151 control[0] = GIRBIL_19200;
152 break;
153 case 34800:
154 control[0] = GIRBIL_38400;
155 break;
156 case 57600:
157 control[0] = GIRBIL_57600;
158 break;
159 case 115200:
160 control[0] = GIRBIL_115200;
161 break;
162 }
163 control[1] = GIRBIL_LOAD;
164
165 /* Write control bytes */
166 sirdev_raw_write(dev, control, 2);
167
168 dev->speed = speed;
169
170 state = GIRBIL_STATE_WAIT_SPEED;
171 delay = 100;
172 break;
173
174 case GIRBIL_STATE_WAIT_SPEED:
175 /* Go back to normal mode */
176 sirdev_set_dtr_rts(dev, TRUE, TRUE);
177
178 udelay(25); /* better wait a little while */
179 break;
180
181 default:
182 IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state);
183 ret = -EINVAL;
184 break;
185 }
186 dev->fsm.substate = state;
187 return (delay > 0) ? delay : ret;
188}
189
190/*
191 * Function girbil_reset (driver)
192 *
193 * This function resets the girbil dongle.
194 *
195 * Algorithm:
196 * 0. set RTS, and wait at least 5 ms
197 * 1. clear RTS
198 */
199
200
201#define GIRBIL_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET + 1)
202#define GIRBIL_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET + 2)
203#define GIRBIL_STATE_WAIT3_RESET (SIRDEV_STATE_DONGLE_RESET + 3)
204
205static int girbil_reset(struct sir_dev *dev)
206{
207 unsigned state = dev->fsm.substate;
208 unsigned delay = 0;
209 u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
210 int ret = 0;
211
212 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
213
214 switch (state) {
215 case SIRDEV_STATE_DONGLE_RESET:
216 /* Reset dongle */
217 sirdev_set_dtr_rts(dev, TRUE, FALSE);
218 /* Sleep at least 5 ms */
219 delay = 20;
220 state = GIRBIL_STATE_WAIT1_RESET;
221 break;
222
223 case GIRBIL_STATE_WAIT1_RESET:
224 /* Set DTR and clear RTS to enter command mode */
225 sirdev_set_dtr_rts(dev, FALSE, TRUE);
226 delay = 20;
227 state = GIRBIL_STATE_WAIT2_RESET;
228 break;
229
230 case GIRBIL_STATE_WAIT2_RESET:
231 /* Write control byte */
232 sirdev_raw_write(dev, &control, 1);
233 delay = 20;
234 state = GIRBIL_STATE_WAIT3_RESET;
235 break;
236
237 case GIRBIL_STATE_WAIT3_RESET:
238 /* Go back to normal mode */
239 sirdev_set_dtr_rts(dev, TRUE, TRUE);
240 dev->speed = 9600;
241 break;
242
243 default:
244 IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state);
245 ret = -1;
246 break;
247 }
248 dev->fsm.substate = state;
249 return (delay > 0) ? delay : ret;
250}
251
252MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
253MODULE_DESCRIPTION("Greenwich GIrBIL dongle driver");
254MODULE_LICENSE("GPL");
255MODULE_ALIAS("irda-dongle-4"); /* IRDA_GIRBIL_DONGLE */
256
257module_init(girbil_sir_init);
258module_exit(girbil_sir_cleanup);
diff --git a/drivers/net/irda/girbil.c b/drivers/net/irda/girbil.c
new file mode 100644
index 000000000000..248aeb0c726c
--- /dev/null
+++ b/drivers/net/irda/girbil.c
@@ -0,0 +1,250 @@
1/*********************************************************************
2 *
3 * Filename: girbil.c
4 * Version: 1.2
5 * Description: Implementation for the Greenwich GIrBIL dongle
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sat Feb 6 21:02:33 1999
9 * Modified at: Fri Dec 17 09:13:20 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 * Neither Dag Brattli nor University of Tromsų admit liability nor
20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge.
22 *
23 ********************************************************************/
24
25#include <linux/module.h>
26#include <linux/delay.h>
27#include <linux/tty.h>
28#include <linux/init.h>
29
30#include <net/irda/irda.h>
31#include <net/irda/irda_device.h>
32
33static int girbil_reset(struct irda_task *task);
34static void girbil_open(dongle_t *self, struct qos_info *qos);
35static void girbil_close(dongle_t *self);
36static int girbil_change_speed(struct irda_task *task);
37
38/* Control register 1 */
39#define GIRBIL_TXEN 0x01 /* Enable transmitter */
40#define GIRBIL_RXEN 0x02 /* Enable receiver */
41#define GIRBIL_ECAN 0x04 /* Cancel self emmited data */
42#define GIRBIL_ECHO 0x08 /* Echo control characters */
43
44/* LED Current Register (0x2) */
45#define GIRBIL_HIGH 0x20
46#define GIRBIL_MEDIUM 0x21
47#define GIRBIL_LOW 0x22
48
49/* Baud register (0x3) */
50#define GIRBIL_2400 0x30
51#define GIRBIL_4800 0x31
52#define GIRBIL_9600 0x32
53#define GIRBIL_19200 0x33
54#define GIRBIL_38400 0x34
55#define GIRBIL_57600 0x35
56#define GIRBIL_115200 0x36
57
58/* Mode register (0x4) */
59#define GIRBIL_IRDA 0x40
60#define GIRBIL_ASK 0x41
61
62/* Control register 2 (0x5) */
63#define GIRBIL_LOAD 0x51 /* Load the new baud rate value */
64
65static struct dongle_reg dongle = {
66 .type = IRDA_GIRBIL_DONGLE,
67 .open = girbil_open,
68 .close = girbil_close,
69 .reset = girbil_reset,
70 .change_speed = girbil_change_speed,
71 .owner = THIS_MODULE,
72};
73
74static int __init girbil_init(void)
75{
76 return irda_device_register_dongle(&dongle);
77}
78
79static void __exit girbil_cleanup(void)
80{
81 irda_device_unregister_dongle(&dongle);
82}
83
84static void girbil_open(dongle_t *self, struct qos_info *qos)
85{
86 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
87 qos->min_turn_time.bits = 0x03;
88}
89
90static void girbil_close(dongle_t *self)
91{
92 /* Power off dongle */
93 self->set_dtr_rts(self->dev, FALSE, FALSE);
94}
95
96/*
97 * Function girbil_change_speed (dev, speed)
98 *
99 * Set the speed for the Girbil type dongle.
100 *
101 */
102static int girbil_change_speed(struct irda_task *task)
103{
104 dongle_t *self = (dongle_t *) task->instance;
105 __u32 speed = (__u32) task->param;
106 __u8 control[2];
107 int ret = 0;
108
109 self->speed_task = task;
110
111 switch (task->state) {
112 case IRDA_TASK_INIT:
113 /* Need to reset the dongle and go to 9600 bps before
114 programming */
115 if (irda_task_execute(self, girbil_reset, NULL, task,
116 (void *) speed))
117 {
118 /* Dongle need more time to reset */
119 irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
120
121 /* Give reset 1 sec to finish */
122 ret = msecs_to_jiffies(1000);
123 }
124 break;
125 case IRDA_TASK_CHILD_WAIT:
126 IRDA_WARNING("%s(), resetting dongle timed out!\n",
127 __FUNCTION__);
128 ret = -1;
129 break;
130 case IRDA_TASK_CHILD_DONE:
131 /* Set DTR and Clear RTS to enter command mode */
132 self->set_dtr_rts(self->dev, FALSE, TRUE);
133
134 switch (speed) {
135 case 9600:
136 default:
137 control[0] = GIRBIL_9600;
138 break;
139 case 19200:
140 control[0] = GIRBIL_19200;
141 break;
142 case 34800:
143 control[0] = GIRBIL_38400;
144 break;
145 case 57600:
146 control[0] = GIRBIL_57600;
147 break;
148 case 115200:
149 control[0] = GIRBIL_115200;
150 break;
151 }
152 control[1] = GIRBIL_LOAD;
153
154 /* Write control bytes */
155 self->write(self->dev, control, 2);
156 irda_task_next_state(task, IRDA_TASK_WAIT);
157 ret = msecs_to_jiffies(100);
158 break;
159 case IRDA_TASK_WAIT:
160 /* Go back to normal mode */
161 self->set_dtr_rts(self->dev, TRUE, TRUE);
162 irda_task_next_state(task, IRDA_TASK_DONE);
163 self->speed_task = NULL;
164 break;
165 default:
166 IRDA_ERROR("%s(), unknown state %d\n",
167 __FUNCTION__, task->state);
168 irda_task_next_state(task, IRDA_TASK_DONE);
169 self->speed_task = NULL;
170 ret = -1;
171 break;
172 }
173 return ret;
174}
175
176/*
177 * Function girbil_reset (driver)
178 *
179 * This function resets the girbil dongle.
180 *
181 * Algorithm:
182 * 0. set RTS, and wait at least 5 ms
183 * 1. clear RTS
184 */
185static int girbil_reset(struct irda_task *task)
186{
187 dongle_t *self = (dongle_t *) task->instance;
188 __u8 control = GIRBIL_TXEN | GIRBIL_RXEN;
189 int ret = 0;
190
191 self->reset_task = task;
192
193 switch (task->state) {
194 case IRDA_TASK_INIT:
195 /* Reset dongle */
196 self->set_dtr_rts(self->dev, TRUE, FALSE);
197 irda_task_next_state(task, IRDA_TASK_WAIT1);
198 /* Sleep at least 5 ms */
199 ret = msecs_to_jiffies(20);
200 break;
201 case IRDA_TASK_WAIT1:
202 /* Set DTR and clear RTS to enter command mode */
203 self->set_dtr_rts(self->dev, FALSE, TRUE);
204 irda_task_next_state(task, IRDA_TASK_WAIT2);
205 ret = msecs_to_jiffies(20);
206 break;
207 case IRDA_TASK_WAIT2:
208 /* Write control byte */
209 self->write(self->dev, &control, 1);
210 irda_task_next_state(task, IRDA_TASK_WAIT3);
211 ret = msecs_to_jiffies(20);
212 break;
213 case IRDA_TASK_WAIT3:
214 /* Go back to normal mode */
215 self->set_dtr_rts(self->dev, TRUE, TRUE);
216 irda_task_next_state(task, IRDA_TASK_DONE);
217 self->reset_task = NULL;
218 break;
219 default:
220 IRDA_ERROR("%s(), unknown state %d\n",
221 __FUNCTION__, task->state);
222 irda_task_next_state(task, IRDA_TASK_DONE);
223 self->reset_task = NULL;
224 ret = -1;
225 break;
226 }
227 return ret;
228}
229
230MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
231MODULE_DESCRIPTION("Greenwich GIrBIL dongle driver");
232MODULE_LICENSE("GPL");
233MODULE_ALIAS("irda-dongle-4"); /* IRDA_GIRBIL_DONGLE */
234
235/*
236 * Function init_module (void)
237 *
238 * Initialize Girbil module
239 *
240 */
241module_init(girbil_init);
242
243/*
244 * Function cleanup_module (void)
245 *
246 * Cleanup Girbil module
247 *
248 */
249module_exit(girbil_cleanup);
250
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
new file mode 100644
index 000000000000..46e0022d3258
--- /dev/null
+++ b/drivers/net/irda/irda-usb.c
@@ -0,0 +1,1602 @@
1/*****************************************************************************
2 *
3 * Filename: irda-usb.c
4 * Version: 0.9b
5 * Description: IrDA-USB Driver
6 * Status: Experimental
7 * Author: Dag Brattli <dag@brattli.net>
8 *
9 * Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
10 * Copyright (C) 2001, Dag Brattli <dag@brattli.net>
11 * Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *****************************************************************************/
28
29/*
30 * IMPORTANT NOTE
31 * --------------
32 *
33 * As of kernel 2.5.20, this is the state of compliance and testing of
34 * this driver (irda-usb) with regards to the USB low level drivers...
35 *
36 * This driver has been tested SUCCESSFULLY with the following drivers :
37 * o usb-uhci-hcd (For Intel/Via USB controllers)
38 * o uhci-hcd (Alternate/JE driver for Intel/Via USB controllers)
39 * o ohci-hcd (For other USB controllers)
40 *
41 * This driver has NOT been tested with the following drivers :
42 * o ehci-hcd (USB 2.0 controllers)
43 *
44 * Note that all HCD drivers do URB_ZERO_PACKET and timeout properly,
45 * so we don't have to worry about that anymore.
46 * One common problem is the failure to set the address on the dongle,
47 * but this happens before the driver gets loaded...
48 *
49 * Jean II
50 */
51
52/*------------------------------------------------------------------*/
53
54#include <linux/module.h>
55#include <linux/moduleparam.h>
56#include <linux/kernel.h>
57#include <linux/types.h>
58#include <linux/init.h>
59#include <linux/skbuff.h>
60#include <linux/netdevice.h>
61#include <linux/slab.h>
62#include <linux/rtnetlink.h>
63#include <linux/usb.h>
64
65#include "irda-usb.h"
66
67/*------------------------------------------------------------------*/
68
69static int qos_mtt_bits = 0;
70
71/* These are the currently known IrDA USB dongles. Add new dongles here */
72static struct usb_device_id dongles[] = {
73 /* ACTiSYS Corp., ACT-IR2000U FIR-USB Adapter */
74 { USB_DEVICE(0x9c4, 0x011), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
75 /* Look like ACTiSYS, Report : IBM Corp., IBM UltraPort IrDA */
76 { USB_DEVICE(0x4428, 0x012), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
77 /* KC Technology Inc., KC-180 USB IrDA Device */
78 { USB_DEVICE(0x50f, 0x180), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
79 /* Extended Systems, Inc., XTNDAccess IrDA USB (ESI-9685) */
80 { USB_DEVICE(0x8e9, 0x100), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW },
81 { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
82 USB_DEVICE_ID_MATCH_INT_SUBCLASS,
83 .bInterfaceClass = USB_CLASS_APP_SPEC,
84 .bInterfaceSubClass = USB_CLASS_IRDA,
85 .driver_info = IUC_DEFAULT, },
86 { }, /* The end */
87};
88
89/*
90 * Important note :
91 * Devices based on the SigmaTel chipset (0x66f, 0x4200) are not designed
92 * using the "USB-IrDA specification" (yes, there exist such a thing), and
93 * therefore not supported by this driver (don't add them above).
94 * There is a Linux driver, stir4200, that support those USB devices.
95 * Jean II
96 */
97
98MODULE_DEVICE_TABLE(usb, dongles);
99
100/*------------------------------------------------------------------*/
101
102static struct irda_class_desc *irda_usb_find_class_desc(struct usb_interface *intf);
103static void irda_usb_disconnect(struct usb_interface *intf);
104static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self);
105static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *dev);
106static int irda_usb_open(struct irda_usb_cb *self);
107static void irda_usb_close(struct irda_usb_cb *self);
108static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs);
109static void write_bulk_callback(struct urb *urb, struct pt_regs *regs);
110static void irda_usb_receive(struct urb *urb, struct pt_regs *regs);
111static int irda_usb_net_open(struct net_device *dev);
112static int irda_usb_net_close(struct net_device *dev);
113static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
114static void irda_usb_net_timeout(struct net_device *dev);
115static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev);
116
117/************************ TRANSMIT ROUTINES ************************/
118/*
119 * Receive packets from the IrDA stack and send them on the USB pipe.
120 * Handle speed change, timeout and lot's of ugliness...
121 */
122
123/*------------------------------------------------------------------*/
124/*
125 * Function irda_usb_build_header(self, skb, header)
126 *
127 * Builds USB-IrDA outbound header
128 *
129 * When we send an IrDA frame over an USB pipe, we add to it a 1 byte
130 * header. This function create this header with the proper values.
131 *
132 * Important note : the USB-IrDA spec 1.0 say very clearly in chapter 5.4.2.2
133 * that the setting of the link speed and xbof number in this outbound header
134 * should be applied *AFTER* the frame has been sent.
135 * Unfortunately, some devices are not compliant with that... It seems that
136 * reading the spec is far too difficult...
137 * Jean II
138 */
139static void irda_usb_build_header(struct irda_usb_cb *self,
140 __u8 *header,
141 int force)
142{
143 /* Set the negotiated link speed */
144 if (self->new_speed != -1) {
145 /* Hum... Ugly hack :-(
146 * Some device are not compliant with the spec and change
147 * parameters *before* sending the frame. - Jean II
148 */
149 if ((self->capability & IUC_SPEED_BUG) &&
150 (!force) && (self->speed != -1)) {
151 /* No speed and xbofs change here
152 * (we'll do it later in the write callback) */
153 IRDA_DEBUG(2, "%s(), not changing speed yet\n", __FUNCTION__);
154 *header = 0;
155 return;
156 }
157
158 IRDA_DEBUG(2, "%s(), changing speed to %d\n", __FUNCTION__, self->new_speed);
159 self->speed = self->new_speed;
160 /* We will do ` self->new_speed = -1; ' in the completion
161 * handler just in case the current URB fail - Jean II */
162
163 switch (self->speed) {
164 case 2400:
165 *header = SPEED_2400;
166 break;
167 default:
168 case 9600:
169 *header = SPEED_9600;
170 break;
171 case 19200:
172 *header = SPEED_19200;
173 break;
174 case 38400:
175 *header = SPEED_38400;
176 break;
177 case 57600:
178 *header = SPEED_57600;
179 break;
180 case 115200:
181 *header = SPEED_115200;
182 break;
183 case 576000:
184 *header = SPEED_576000;
185 break;
186 case 1152000:
187 *header = SPEED_1152000;
188 break;
189 case 4000000:
190 *header = SPEED_4000000;
191 self->new_xbofs = 0;
192 break;
193 }
194 } else
195 /* No change */
196 *header = 0;
197
198 /* Set the negotiated additional XBOFS */
199 if (self->new_xbofs != -1) {
200 IRDA_DEBUG(2, "%s(), changing xbofs to %d\n", __FUNCTION__, self->new_xbofs);
201 self->xbofs = self->new_xbofs;
202 /* We will do ` self->new_xbofs = -1; ' in the completion
203 * handler just in case the current URB fail - Jean II */
204
205 switch (self->xbofs) {
206 case 48:
207 *header |= 0x10;
208 break;
209 case 28:
210 case 24: /* USB spec 1.0 says 24 */
211 *header |= 0x20;
212 break;
213 default:
214 case 12:
215 *header |= 0x30;
216 break;
217 case 5: /* Bug in IrLAP spec? (should be 6) */
218 case 6:
219 *header |= 0x40;
220 break;
221 case 3:
222 *header |= 0x50;
223 break;
224 case 2:
225 *header |= 0x60;
226 break;
227 case 1:
228 *header |= 0x70;
229 break;
230 case 0:
231 *header |= 0x80;
232 break;
233 }
234 }
235}
236
237/*------------------------------------------------------------------*/
238/*
239 * Send a command to change the speed of the dongle
240 * Need to be called with spinlock on.
241 */
242static void irda_usb_change_speed_xbofs(struct irda_usb_cb *self)
243{
244 __u8 *frame;
245 struct urb *urb;
246 int ret;
247
248 IRDA_DEBUG(2, "%s(), speed=%d, xbofs=%d\n", __FUNCTION__,
249 self->new_speed, self->new_xbofs);
250
251 /* Grab the speed URB */
252 urb = self->speed_urb;
253 if (urb->status != 0) {
254 IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__);
255 return;
256 }
257
258 /* Allocate the fake frame */
259 frame = self->speed_buff;
260
261 /* Set the new speed and xbofs in this fake frame */
262 irda_usb_build_header(self, frame, 1);
263
264 /* Submit the 0 length IrDA frame to trigger new speed settings */
265 usb_fill_bulk_urb(urb, self->usbdev,
266 usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
267 frame, IRDA_USB_SPEED_MTU,
268 speed_bulk_callback, self);
269 urb->transfer_buffer_length = USB_IRDA_HEADER;
270 urb->transfer_flags = URB_ASYNC_UNLINK;
271
272 /* Irq disabled -> GFP_ATOMIC */
273 if ((ret = usb_submit_urb(urb, GFP_ATOMIC))) {
274 IRDA_WARNING("%s(), failed Speed URB\n", __FUNCTION__);
275 }
276}
277
278/*------------------------------------------------------------------*/
279/*
280 * Speed URB callback
281 * Now, we can only get called for the speed URB.
282 */
283static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs)
284{
285 struct irda_usb_cb *self = urb->context;
286
287 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
288
289 /* We should always have a context */
290 IRDA_ASSERT(self != NULL, return;);
291 /* We should always be called for the speed URB */
292 IRDA_ASSERT(urb == self->speed_urb, return;);
293
294 /* Check for timeout and other USB nasties */
295 if (urb->status != 0) {
296 /* I get a lot of -ECONNABORTED = -103 here - Jean II */
297 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags);
298
299 /* Don't do anything here, that might confuse the USB layer.
300 * Instead, we will wait for irda_usb_net_timeout(), the
301 * network layer watchdog, to fix the situation.
302 * Jean II */
303 /* A reset of the dongle might be welcomed here - Jean II */
304 return;
305 }
306
307 /* urb is now available */
308 //urb->status = 0; -> tested above
309
310 /* New speed and xbof is now commited in hardware */
311 self->new_speed = -1;
312 self->new_xbofs = -1;
313
314 /* Allow the stack to send more packets */
315 netif_wake_queue(self->netdev);
316}
317
318/*------------------------------------------------------------------*/
319/*
320 * Send an IrDA frame to the USB dongle (for transmission)
321 */
322static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
323{
324 struct irda_usb_cb *self = netdev->priv;
325 struct urb *urb = self->tx_urb;
326 unsigned long flags;
327 s32 speed;
328 s16 xbofs;
329 int res, mtt;
330 int err = 1; /* Failed */
331
332 IRDA_DEBUG(4, "%s() on %s\n", __FUNCTION__, netdev->name);
333
334 netif_stop_queue(netdev);
335
336 /* Protect us from USB callbacks, net watchdog and else. */
337 spin_lock_irqsave(&self->lock, flags);
338
339 /* Check if the device is still there.
340 * We need to check self->present under the spinlock because
341 * of irda_usb_disconnect() is synchronous - Jean II */
342 if (!self->present) {
343 IRDA_DEBUG(0, "%s(), Device is gone...\n", __FUNCTION__);
344 goto drop;
345 }
346
347 /* Check if we need to change the number of xbofs */
348 xbofs = irda_get_next_xbofs(skb);
349 if ((xbofs != self->xbofs) && (xbofs != -1)) {
350 self->new_xbofs = xbofs;
351 }
352
353 /* Check if we need to change the speed */
354 speed = irda_get_next_speed(skb);
355 if ((speed != self->speed) && (speed != -1)) {
356 /* Set the desired speed */
357 self->new_speed = speed;
358
359 /* Check for empty frame */
360 if (!skb->len) {
361 /* IrLAP send us an empty frame to make us change the
362 * speed. Changing speed with the USB adapter is in
363 * fact sending an empty frame to the adapter, so we
364 * could just let the present function do its job.
365 * However, we would wait for min turn time,
366 * do an extra memcpy and increment packet counters...
367 * Jean II */
368 irda_usb_change_speed_xbofs(self);
369 netdev->trans_start = jiffies;
370 /* Will netif_wake_queue() in callback */
371 err = 0; /* No error */
372 goto drop;
373 }
374 }
375
376 if (urb->status != 0) {
377 IRDA_WARNING("%s(), URB still in use!\n", __FUNCTION__);
378 goto drop;
379 }
380
381 /* Make sure there is room for IrDA-USB header. The actual
382 * allocation will be done lower in skb_push().
383 * Also, we don't use directly skb_cow(), because it require
384 * headroom >= 16, which force unnecessary copies - Jean II */
385 if (skb_headroom(skb) < USB_IRDA_HEADER) {
386 IRDA_DEBUG(0, "%s(), Insuficient skb headroom.\n", __FUNCTION__);
387 if (skb_cow(skb, USB_IRDA_HEADER)) {
388 IRDA_WARNING("%s(), failed skb_cow() !!!\n", __FUNCTION__);
389 goto drop;
390 }
391 }
392
393 /* Change setting for next frame */
394 irda_usb_build_header(self, skb_push(skb, USB_IRDA_HEADER), 0);
395
396 /* FIXME: Make macro out of this one */
397 ((struct irda_skb_cb *)skb->cb)->context = self;
398
399 usb_fill_bulk_urb(urb, self->usbdev,
400 usb_sndbulkpipe(self->usbdev, self->bulk_out_ep),
401 skb->data, IRDA_SKB_MAX_MTU,
402 write_bulk_callback, skb);
403 urb->transfer_buffer_length = skb->len;
404 /* Note : unlink *must* be Asynchronous because of the code in
405 * irda_usb_net_timeout() -> call in irq - Jean II */
406 urb->transfer_flags = URB_ASYNC_UNLINK;
407 /* This flag (URB_ZERO_PACKET) indicates that what we send is not
408 * a continuous stream of data but separate packets.
409 * In this case, the USB layer will insert an empty USB frame (TD)
410 * after each of our packets that is exact multiple of the frame size.
411 * This is how the dongle will detect the end of packet - Jean II */
412 urb->transfer_flags |= URB_ZERO_PACKET;
413
414 /* Generate min turn time. FIXME: can we do better than this? */
415 /* Trying to a turnaround time at this level is trying to measure
416 * processor clock cycle with a wrist-watch, approximate at best...
417 *
418 * What we know is the last time we received a frame over USB.
419 * Due to latency over USB that depend on the USB load, we don't
420 * know when this frame was received over IrDA (a few ms before ?)
421 * Then, same story for our outgoing frame...
422 *
423 * In theory, the USB dongle is supposed to handle the turnaround
424 * by itself (spec 1.0, chater 4, page 6). Who knows ??? That's
425 * why this code is enabled only for dongles that doesn't meet
426 * the spec.
427 * Jean II */
428 if (self->capability & IUC_NO_TURN) {
429 mtt = irda_get_mtt(skb);
430 if (mtt) {
431 int diff;
432 do_gettimeofday(&self->now);
433 diff = self->now.tv_usec - self->stamp.tv_usec;
434#ifdef IU_USB_MIN_RTT
435 /* Factor in USB delays -> Get rid of udelay() that
436 * would be lost in the noise - Jean II */
437 diff += IU_USB_MIN_RTT;
438#endif /* IU_USB_MIN_RTT */
439 /* If the usec counter did wraparound, the diff will
440 * go negative (tv_usec is a long), so we need to
441 * correct it by one second. Jean II */
442 if (diff < 0)
443 diff += 1000000;
444
445 /* Check if the mtt is larger than the time we have
446 * already used by all the protocol processing
447 */
448 if (mtt > diff) {
449 mtt -= diff;
450 if (mtt > 1000)
451 mdelay(mtt/1000);
452 else
453 udelay(mtt);
454 }
455 }
456 }
457
458 /* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
459 if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
460 IRDA_WARNING("%s(), failed Tx URB\n", __FUNCTION__);
461 self->stats.tx_errors++;
462 /* Let USB recover : We will catch that in the watchdog */
463 /*netif_start_queue(netdev);*/
464 } else {
465 /* Increment packet stats */
466 self->stats.tx_packets++;
467 self->stats.tx_bytes += skb->len;
468
469 netdev->trans_start = jiffies;
470 }
471 spin_unlock_irqrestore(&self->lock, flags);
472
473 return 0;
474
475drop:
476 /* Drop silently the skb and exit */
477 dev_kfree_skb(skb);
478 spin_unlock_irqrestore(&self->lock, flags);
479 return err; /* Usually 1 */
480}
481
482/*------------------------------------------------------------------*/
483/*
484 * Note : this function will be called only for tx_urb...
485 */
486static void write_bulk_callback(struct urb *urb, struct pt_regs *regs)
487{
488 unsigned long flags;
489 struct sk_buff *skb = urb->context;
490 struct irda_usb_cb *self = ((struct irda_skb_cb *) skb->cb)->context;
491
492 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
493
494 /* We should always have a context */
495 IRDA_ASSERT(self != NULL, return;);
496 /* We should always be called for the speed URB */
497 IRDA_ASSERT(urb == self->tx_urb, return;);
498
499 /* Free up the skb */
500 dev_kfree_skb_any(skb);
501 urb->context = NULL;
502
503 /* Check for timeout and other USB nasties */
504 if (urb->status != 0) {
505 /* I get a lot of -ECONNABORTED = -103 here - Jean II */
506 IRDA_DEBUG(0, "%s(), URB complete status %d, transfer_flags 0x%04X\n", __FUNCTION__, urb->status, urb->transfer_flags);
507
508 /* Don't do anything here, that might confuse the USB layer,
509 * and we could go in recursion and blow the kernel stack...
510 * Instead, we will wait for irda_usb_net_timeout(), the
511 * network layer watchdog, to fix the situation.
512 * Jean II */
513 /* A reset of the dongle might be welcomed here - Jean II */
514 return;
515 }
516
517 /* urb is now available */
518 //urb->status = 0; -> tested above
519
520 /* Make sure we read self->present properly */
521 spin_lock_irqsave(&self->lock, flags);
522
523 /* If the network is closed, stop everything */
524 if ((!self->netopen) || (!self->present)) {
525 IRDA_DEBUG(0, "%s(), Network is gone...\n", __FUNCTION__);
526 spin_unlock_irqrestore(&self->lock, flags);
527 return;
528 }
529
530 /* If changes to speed or xbofs is pending... */
531 if ((self->new_speed != -1) || (self->new_xbofs != -1)) {
532 if ((self->new_speed != self->speed) ||
533 (self->new_xbofs != self->xbofs)) {
534 /* We haven't changed speed yet (because of
535 * IUC_SPEED_BUG), so do it now - Jean II */
536 IRDA_DEBUG(1, "%s(), Changing speed now...\n", __FUNCTION__);
537 irda_usb_change_speed_xbofs(self);
538 } else {
539 /* New speed and xbof is now commited in hardware */
540 self->new_speed = -1;
541 self->new_xbofs = -1;
542 /* Done, waiting for next packet */
543 netif_wake_queue(self->netdev);
544 }
545 } else {
546 /* Otherwise, allow the stack to send more packets */
547 netif_wake_queue(self->netdev);
548 }
549 spin_unlock_irqrestore(&self->lock, flags);
550}
551
552/*------------------------------------------------------------------*/
553/*
554 * Watchdog timer from the network layer.
555 * After a predetermined timeout, if we don't give confirmation that
556 * the packet has been sent (i.e. no call to netif_wake_queue()),
557 * the network layer will call this function.
558 * Note that URB that we submit have also a timeout. When the URB timeout
559 * expire, the normal URB callback is called (write_bulk_callback()).
560 */
561static void irda_usb_net_timeout(struct net_device *netdev)
562{
563 unsigned long flags;
564 struct irda_usb_cb *self = netdev->priv;
565 struct urb *urb;
566 int done = 0; /* If we have made any progress */
567
568 IRDA_DEBUG(0, "%s(), Network layer thinks we timed out!\n", __FUNCTION__);
569 IRDA_ASSERT(self != NULL, return;);
570
571 /* Protect us from USB callbacks, net Tx and else. */
572 spin_lock_irqsave(&self->lock, flags);
573
574 /* self->present *MUST* be read under spinlock */
575 if (!self->present) {
576 IRDA_WARNING("%s(), device not present!\n", __FUNCTION__);
577 netif_stop_queue(netdev);
578 spin_unlock_irqrestore(&self->lock, flags);
579 return;
580 }
581
582 /* Check speed URB */
583 urb = self->speed_urb;
584 if (urb->status != 0) {
585 IRDA_DEBUG(0, "%s: Speed change timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
586
587 switch (urb->status) {
588 case -EINPROGRESS:
589 usb_unlink_urb(urb);
590 /* Note : above will *NOT* call netif_wake_queue()
591 * in completion handler, we will come back here.
592 * Jean II */
593 done = 1;
594 break;
595 case -ECONNABORTED: /* -103 */
596 case -ECONNRESET: /* -104 */
597 case -ETIMEDOUT: /* -110 */
598 case -ENOENT: /* -2 (urb unlinked by us) */
599 default: /* ??? - Play safe */
600 urb->status = 0;
601 netif_wake_queue(self->netdev);
602 done = 1;
603 break;
604 }
605 }
606
607 /* Check Tx URB */
608 urb = self->tx_urb;
609 if (urb->status != 0) {
610 struct sk_buff *skb = urb->context;
611
612 IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
613
614 /* Increase error count */
615 self->stats.tx_errors++;
616
617#ifdef IU_BUG_KICK_TIMEOUT
618 /* Can't be a bad idea to reset the speed ;-) - Jean II */
619 if(self->new_speed == -1)
620 self->new_speed = self->speed;
621 if(self->new_xbofs == -1)
622 self->new_xbofs = self->xbofs;
623 irda_usb_change_speed_xbofs(self);
624#endif /* IU_BUG_KICK_TIMEOUT */
625
626 switch (urb->status) {
627 case -EINPROGRESS:
628 usb_unlink_urb(urb);
629 /* Note : above will *NOT* call netif_wake_queue()
630 * in completion handler, because urb->status will
631 * be -ENOENT. We will fix that at the next watchdog,
632 * leaving more time to USB to recover...
633 * Also, we are in interrupt, so we need to have
634 * URB_ASYNC_UNLINK to work properly...
635 * Jean II */
636 done = 1;
637 break;
638 case -ECONNABORTED: /* -103 */
639 case -ECONNRESET: /* -104 */
640 case -ETIMEDOUT: /* -110 */
641 case -ENOENT: /* -2 (urb unlinked by us) */
642 default: /* ??? - Play safe */
643 if(skb != NULL) {
644 dev_kfree_skb_any(skb);
645 urb->context = NULL;
646 }
647 urb->status = 0;
648 netif_wake_queue(self->netdev);
649 done = 1;
650 break;
651 }
652 }
653 spin_unlock_irqrestore(&self->lock, flags);
654
655 /* Maybe we need a reset */
656 /* Note : Some drivers seem to use a usb_set_interface() when they
657 * need to reset the hardware. Hum...
658 */
659
660 /* if(done == 0) */
661}
662
663/************************* RECEIVE ROUTINES *************************/
664/*
665 * Receive packets from the USB layer stack and pass them to the IrDA stack.
666 * Try to work around USB failures...
667 */
668
669/*
670 * Note :
671 * Some of you may have noticed that most dongle have an interrupt in pipe
672 * that we don't use. Here is the little secret...
673 * When we hang a Rx URB on the bulk in pipe, it generates some USB traffic
674 * in every USB frame. This is unnecessary overhead.
675 * The interrupt in pipe will generate an event every time a packet is
676 * received. Reading an interrupt pipe adds minimal overhead, but has some
677 * latency (~1ms).
678 * If we are connected (speed != 9600), we want to minimise latency, so
679 * we just always hang the Rx URB and ignore the interrupt.
680 * If we are not connected (speed == 9600), there is usually no Rx traffic,
681 * and we want to minimise the USB overhead. In this case we should wait
682 * on the interrupt pipe and hang the Rx URB only when an interrupt is
683 * received.
684 * Jean II
685 */
686
687/*------------------------------------------------------------------*/
688/*
689 * Submit a Rx URB to the USB layer to handle reception of a frame
690 * Mostly called by the completion callback of the previous URB.
691 *
692 * Jean II
693 */
694static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struct urb *urb)
695{
696 struct irda_skb_cb *cb;
697 int ret;
698
699 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
700
701 /* This should never happen */
702 IRDA_ASSERT(skb != NULL, return;);
703 IRDA_ASSERT(urb != NULL, return;);
704
705 /* Save ourselves in the skb */
706 cb = (struct irda_skb_cb *) skb->cb;
707 cb->context = self;
708
709 /* Reinitialize URB */
710 usb_fill_bulk_urb(urb, self->usbdev,
711 usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
712 skb->data, skb->truesize,
713 irda_usb_receive, skb);
714 /* Note : unlink *must* be synchronous because of the code in
715 * irda_usb_net_close() -> free the skb - Jean II */
716 urb->status = 0;
717
718 /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
719 ret = usb_submit_urb(urb, GFP_ATOMIC);
720 if (ret) {
721 /* If this ever happen, we are in deep s***.
722 * Basically, the Rx path will stop... */
723 IRDA_WARNING("%s(), Failed to submit Rx URB %d\n",
724 __FUNCTION__, ret);
725 }
726}
727
728/*------------------------------------------------------------------*/
729/*
730 * Function irda_usb_receive(urb)
731 *
732 * Called by the USB subsystem when a frame has been received
733 *
734 */
735static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
736{
737 struct sk_buff *skb = (struct sk_buff *) urb->context;
738 struct irda_usb_cb *self;
739 struct irda_skb_cb *cb;
740 struct sk_buff *newskb;
741 struct sk_buff *dataskb;
742 int docopy;
743
744 IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
745
746 /* Find ourselves */
747 cb = (struct irda_skb_cb *) skb->cb;
748 IRDA_ASSERT(cb != NULL, return;);
749 self = (struct irda_usb_cb *) cb->context;
750 IRDA_ASSERT(self != NULL, return;);
751
752 /* If the network is closed or the device gone, stop everything */
753 if ((!self->netopen) || (!self->present)) {
754 IRDA_DEBUG(0, "%s(), Network is gone!\n", __FUNCTION__);
755 /* Don't re-submit the URB : will stall the Rx path */
756 return;
757 }
758
759 /* Check the status */
760 if (urb->status != 0) {
761 switch (urb->status) {
762 case -EILSEQ:
763 self->stats.rx_errors++;
764 self->stats.rx_crc_errors++;
765 break;
766 case -ECONNRESET: /* -104 */
767 IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags);
768 /* uhci_cleanup_unlink() is going to kill the Rx
769 * URB just after we return. No problem, at this
770 * point the URB will be idle ;-) - Jean II */
771 break;
772 default:
773 IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
774 break;
775 }
776 goto done;
777 }
778
779 /* Check for empty frames */
780 if (urb->actual_length <= USB_IRDA_HEADER) {
781 IRDA_WARNING("%s(), empty frame!\n", __FUNCTION__);
782 goto done;
783 }
784
785 /*
786 * Remember the time we received this frame, so we can
787 * reduce the min turn time a bit since we will know
788 * how much time we have used for protocol processing
789 */
790 do_gettimeofday(&self->stamp);
791
792 /* Check if we need to copy the data to a new skb or not.
793 * For most frames, we use ZeroCopy and pass the already
794 * allocated skb up the stack.
795 * If the frame is small, it is more efficient to copy it
796 * to save memory (copy will be fast anyway - that's
797 * called Rx-copy-break). Jean II */
798 docopy = (urb->actual_length < IRDA_RX_COPY_THRESHOLD);
799
800 /* Allocate a new skb */
801 newskb = dev_alloc_skb(docopy ? urb->actual_length : IRDA_SKB_MAX_MTU);
802 if (!newskb) {
803 self->stats.rx_dropped++;
804 /* We could deliver the current skb, but this would stall
805 * the Rx path. Better drop the packet... Jean II */
806 goto done;
807 }
808
809 /* Make sure IP header get aligned (IrDA header is 5 bytes) */
810 /* But IrDA-USB header is 1 byte. Jean II */
811 //skb_reserve(newskb, USB_IRDA_HEADER - 1);
812
813 if(docopy) {
814 /* Copy packet, so we can recycle the original */
815 memcpy(newskb->data, skb->data, urb->actual_length);
816 /* Deliver this new skb */
817 dataskb = newskb;
818 /* And hook the old skb to the URB
819 * Note : we don't need to "clean up" the old skb,
820 * as we never touched it. Jean II */
821 } else {
822 /* We are using ZeroCopy. Deliver old skb */
823 dataskb = skb;
824 /* And hook the new skb to the URB */
825 skb = newskb;
826 }
827
828 /* Set proper length on skb & remove USB-IrDA header */
829 skb_put(dataskb, urb->actual_length);
830 skb_pull(dataskb, USB_IRDA_HEADER);
831
832 /* Ask the networking layer to queue the packet for the IrDA stack */
833 dataskb->dev = self->netdev;
834 dataskb->mac.raw = dataskb->data;
835 dataskb->protocol = htons(ETH_P_IRDA);
836 netif_rx(dataskb);
837
838 /* Keep stats up to date */
839 self->stats.rx_bytes += dataskb->len;
840 self->stats.rx_packets++;
841 self->netdev->last_rx = jiffies;
842
843done:
844 /* Note : at this point, the URB we've just received (urb)
845 * is still referenced by the USB layer. For example, if we
846 * have received a -ECONNRESET, uhci_cleanup_unlink() will
847 * continue to process it (in fact, cleaning it up).
848 * If we were to submit this URB, disaster would ensue.
849 * Therefore, we submit our idle URB, and put this URB in our
850 * idle slot....
851 * Jean II */
852 /* Note : with this scheme, we could submit the idle URB before
853 * processing the Rx URB. Another time... Jean II */
854
855 /* Submit the idle URB to replace the URB we've just received */
856 irda_usb_submit(self, skb, self->idle_rx_urb);
857 /* Recycle Rx URB : Now, the idle URB is the present one */
858 urb->context = NULL;
859 self->idle_rx_urb = urb;
860}
861
862/*------------------------------------------------------------------*/
863/*
864 * Callbak from IrDA layer. IrDA wants to know if we have
865 * started receiving anything.
866 */
867static int irda_usb_is_receiving(struct irda_usb_cb *self)
868{
869 /* Note : because of the way UHCI works, it's almost impossible
870 * to get this info. The Controller DMA directly to memory and
871 * signal only when the whole frame is finished. To know if the
872 * first TD of the URB has been filled or not seems hard work...
873 *
874 * The other solution would be to use the "receiving" command
875 * on the default decriptor with a usb_control_msg(), but that
876 * would add USB traffic and would return result only in the
877 * next USB frame (~1ms).
878 *
879 * I've been told that current dongles send status info on their
880 * interrupt endpoint, and that's what the Windows driver uses
881 * to know this info. Unfortunately, this is not yet in the spec...
882 *
883 * Jean II
884 */
885
886 return 0; /* For now */
887}
888
889/********************** IRDA DEVICE CALLBACKS **********************/
890/*
891 * Main calls from the IrDA/Network subsystem.
892 * Mostly registering a new irda-usb device and removing it....
893 * We only deal with the IrDA side of the business, the USB side will
894 * be dealt with below...
895 */
896
897
898/*------------------------------------------------------------------*/
899/*
900 * Function irda_usb_net_open (dev)
901 *
902 * Network device is taken up. Usually this is done by "ifconfig irda0 up"
903 *
904 * Note : don't mess with self->netopen - Jean II
905 */
906static int irda_usb_net_open(struct net_device *netdev)
907{
908 struct irda_usb_cb *self;
909 char hwname[16];
910 int i;
911
912 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
913
914 IRDA_ASSERT(netdev != NULL, return -1;);
915 self = (struct irda_usb_cb *) netdev->priv;
916 IRDA_ASSERT(self != NULL, return -1;);
917
918 /* Can only open the device if it's there */
919 if(!self->present) {
920 IRDA_WARNING("%s(), device not present!\n", __FUNCTION__);
921 return -1;
922 }
923
924 /* Initialise default speed and xbofs value
925 * (IrLAP will change that soon) */
926 self->speed = -1;
927 self->xbofs = -1;
928 self->new_speed = -1;
929 self->new_xbofs = -1;
930
931 /* To do *before* submitting Rx urbs and starting net Tx queue
932 * Jean II */
933 self->netopen = 1;
934
935 /*
936 * Now that everything should be initialized properly,
937 * Open new IrLAP layer instance to take care of us...
938 * Note : will send immediately a speed change...
939 */
940 sprintf(hwname, "usb#%d", self->usbdev->devnum);
941 self->irlap = irlap_open(netdev, &self->qos, hwname);
942 IRDA_ASSERT(self->irlap != NULL, return -1;);
943
944 /* Allow IrLAP to send data to us */
945 netif_start_queue(netdev);
946
947 /* We submit all the Rx URB except for one that we keep idle.
948 * Need to be initialised before submitting other USBs, because
949 * in some cases as soon as we submit the URBs the USB layer
950 * will trigger a dummy receive - Jean II */
951 self->idle_rx_urb = self->rx_urb[IU_MAX_ACTIVE_RX_URBS];
952 self->idle_rx_urb->context = NULL;
953
954 /* Now that we can pass data to IrLAP, allow the USB layer
955 * to send us some data... */
956 for (i = 0; i < IU_MAX_ACTIVE_RX_URBS; i++) {
957 struct sk_buff *skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
958 if (!skb) {
959 /* If this ever happen, we are in deep s***.
960 * Basically, we can't start the Rx path... */
961 IRDA_WARNING("%s(), Failed to allocate Rx skb\n",
962 __FUNCTION__);
963 return -1;
964 }
965 //skb_reserve(newskb, USB_IRDA_HEADER - 1);
966 irda_usb_submit(self, skb, self->rx_urb[i]);
967 }
968
969 /* Ready to play !!! */
970 return 0;
971}
972
973/*------------------------------------------------------------------*/
974/*
975 * Function irda_usb_net_close (self)
976 *
977 * Network device is taken down. Usually this is done by
978 * "ifconfig irda0 down"
979 */
980static int irda_usb_net_close(struct net_device *netdev)
981{
982 struct irda_usb_cb *self;
983 int i;
984
985 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
986
987 IRDA_ASSERT(netdev != NULL, return -1;);
988 self = (struct irda_usb_cb *) netdev->priv;
989 IRDA_ASSERT(self != NULL, return -1;);
990
991 /* Clear this flag *before* unlinking the urbs and *before*
992 * stopping the network Tx queue - Jean II */
993 self->netopen = 0;
994
995 /* Stop network Tx queue */
996 netif_stop_queue(netdev);
997
998 /* Deallocate all the Rx path buffers (URBs and skb) */
999 for (i = 0; i < IU_MAX_RX_URBS; i++) {
1000 struct urb *urb = self->rx_urb[i];
1001 struct sk_buff *skb = (struct sk_buff *) urb->context;
1002 /* Cancel the receive command */
1003 usb_kill_urb(urb);
1004 /* The skb is ours, free it */
1005 if(skb) {
1006 dev_kfree_skb(skb);
1007 urb->context = NULL;
1008 }
1009 }
1010 /* Cancel Tx and speed URB - need to be synchronous to avoid races */
1011 self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
1012 usb_kill_urb(self->tx_urb);
1013 self->speed_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
1014 usb_kill_urb(self->speed_urb);
1015
1016 /* Stop and remove instance of IrLAP */
1017 if (self->irlap)
1018 irlap_close(self->irlap);
1019 self->irlap = NULL;
1020
1021 return 0;
1022}
1023
1024/*------------------------------------------------------------------*/
1025/*
1026 * IOCTLs : Extra out-of-band network commands...
1027 */
1028static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1029{
1030 unsigned long flags;
1031 struct if_irda_req *irq = (struct if_irda_req *) rq;
1032 struct irda_usb_cb *self;
1033 int ret = 0;
1034
1035 IRDA_ASSERT(dev != NULL, return -1;);
1036 self = dev->priv;
1037 IRDA_ASSERT(self != NULL, return -1;);
1038
1039 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
1040
1041 switch (cmd) {
1042 case SIOCSBANDWIDTH: /* Set bandwidth */
1043 if (!capable(CAP_NET_ADMIN))
1044 return -EPERM;
1045 /* Protect us from USB callbacks, net watchdog and else. */
1046 spin_lock_irqsave(&self->lock, flags);
1047 /* Check if the device is still there */
1048 if(self->present) {
1049 /* Set the desired speed */
1050 self->new_speed = irq->ifr_baudrate;
1051 irda_usb_change_speed_xbofs(self);
1052 }
1053 spin_unlock_irqrestore(&self->lock, flags);
1054 break;
1055 case SIOCSMEDIABUSY: /* Set media busy */
1056 if (!capable(CAP_NET_ADMIN))
1057 return -EPERM;
1058 /* Check if the IrDA stack is still there */
1059 if(self->netopen)
1060 irda_device_set_media_busy(self->netdev, TRUE);
1061 break;
1062 case SIOCGRECEIVING: /* Check if we are receiving right now */
1063 irq->ifr_receiving = irda_usb_is_receiving(self);
1064 break;
1065 default:
1066 ret = -EOPNOTSUPP;
1067 }
1068
1069 return ret;
1070}
1071
1072/*------------------------------------------------------------------*/
1073/*
1074 * Get device stats (for /proc/net/dev and ifconfig)
1075 */
1076static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev)
1077{
1078 struct irda_usb_cb *self = dev->priv;
1079 return &self->stats;
1080}
1081
1082/********************* IRDA CONFIG SUBROUTINES *********************/
1083/*
1084 * Various subroutines dealing with IrDA and network stuff we use to
1085 * configure and initialise each irda-usb instance.
1086 * These functions are used below in the main calls of the driver...
1087 */
1088
1089/*------------------------------------------------------------------*/
1090/*
1091 * Set proper values in the IrDA QOS structure
1092 */
1093static inline void irda_usb_init_qos(struct irda_usb_cb *self)
1094{
1095 struct irda_class_desc *desc;
1096
1097 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
1098
1099 desc = self->irda_desc;
1100
1101 /* Initialize QoS for this device */
1102 irda_init_max_qos_capabilies(&self->qos);
1103
1104 /* See spec section 7.2 for meaning.
1105 * Values are little endian (as most USB stuff), the IrDA stack
1106 * use it in native order (see parameters.c). - Jean II */
1107 self->qos.baud_rate.bits = le16_to_cpu(desc->wBaudRate);
1108 self->qos.min_turn_time.bits = desc->bmMinTurnaroundTime;
1109 self->qos.additional_bofs.bits = desc->bmAdditionalBOFs;
1110 self->qos.window_size.bits = desc->bmWindowSize;
1111 self->qos.data_size.bits = desc->bmDataSize;
1112
1113 IRDA_DEBUG(0, "%s(), dongle says speed=0x%X, size=0x%X, window=0x%X, bofs=0x%X, turn=0x%X\n",
1114 __FUNCTION__, self->qos.baud_rate.bits, self->qos.data_size.bits, self->qos.window_size.bits, self->qos.additional_bofs.bits, self->qos.min_turn_time.bits);
1115
1116 /* Don't always trust what the dongle tell us */
1117 if(self->capability & IUC_SIR_ONLY)
1118 self->qos.baud_rate.bits &= 0x00ff;
1119 if(self->capability & IUC_SMALL_PKT)
1120 self->qos.data_size.bits = 0x07;
1121 if(self->capability & IUC_NO_WINDOW)
1122 self->qos.window_size.bits = 0x01;
1123 if(self->capability & IUC_MAX_WINDOW)
1124 self->qos.window_size.bits = 0x7f;
1125 if(self->capability & IUC_MAX_XBOFS)
1126 self->qos.additional_bofs.bits = 0x01;
1127
1128#if 1
1129 /* Module parameter can override the rx window size */
1130 if (qos_mtt_bits)
1131 self->qos.min_turn_time.bits = qos_mtt_bits;
1132#endif
1133 /*
1134 * Note : most of those values apply only for the receive path,
1135 * the transmit path will be set differently - Jean II
1136 */
1137 irda_qos_bits_to_value(&self->qos);
1138}
1139
1140/*------------------------------------------------------------------*/
1141/*
1142 * Initialise the network side of the irda-usb instance
1143 * Called when a new USB instance is registered in irda_usb_probe()
1144 */
1145static inline int irda_usb_open(struct irda_usb_cb *self)
1146{
1147 struct net_device *netdev = self->netdev;
1148
1149 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
1150
1151 irda_usb_init_qos(self);
1152
1153 /* Override the network functions we need to use */
1154 netdev->hard_start_xmit = irda_usb_hard_xmit;
1155 netdev->tx_timeout = irda_usb_net_timeout;
1156 netdev->watchdog_timeo = 250*HZ/1000; /* 250 ms > USB timeout */
1157 netdev->open = irda_usb_net_open;
1158 netdev->stop = irda_usb_net_close;
1159 netdev->get_stats = irda_usb_net_get_stats;
1160 netdev->do_ioctl = irda_usb_net_ioctl;
1161
1162 return register_netdev(netdev);
1163}
1164
1165/*------------------------------------------------------------------*/
1166/*
1167 * Cleanup the network side of the irda-usb instance
1168 * Called when a USB instance is removed in irda_usb_disconnect()
1169 */
1170static inline void irda_usb_close(struct irda_usb_cb *self)
1171{
1172 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
1173
1174 /* Remove netdevice */
1175 unregister_netdev(self->netdev);
1176
1177 /* Remove the speed buffer */
1178 if (self->speed_buff != NULL) {
1179 kfree(self->speed_buff);
1180 self->speed_buff = NULL;
1181 }
1182}
1183
1184/********************** USB CONFIG SUBROUTINES **********************/
1185/*
1186 * Various subroutines dealing with USB stuff we use to configure and
1187 * initialise each irda-usb instance.
1188 * These functions are used below in the main calls of the driver...
1189 */
1190
1191/*------------------------------------------------------------------*/
1192/*
1193 * Function irda_usb_parse_endpoints(dev, ifnum)
1194 *
1195 * Parse the various endpoints and find the one we need.
1196 *
1197 * The endpoint are the pipes used to communicate with the USB device.
1198 * The spec defines 2 endpoints of type bulk transfer, one in, and one out.
1199 * These are used to pass frames back and forth with the dongle.
1200 * Most dongle have also an interrupt endpoint, that will be probably
1201 * documented in the next spec...
1202 */
1203static inline int irda_usb_parse_endpoints(struct irda_usb_cb *self, struct usb_host_endpoint *endpoint, int ennum)
1204{
1205 int i; /* Endpoint index in table */
1206
1207 /* Init : no endpoints */
1208 self->bulk_in_ep = 0;
1209 self->bulk_out_ep = 0;
1210 self->bulk_int_ep = 0;
1211
1212 /* Let's look at all those endpoints */
1213 for(i = 0; i < ennum; i++) {
1214 /* All those variables will get optimised by the compiler,
1215 * so let's aim for clarity... - Jean II */
1216 __u8 ep; /* Endpoint address */
1217 __u8 dir; /* Endpoint direction */
1218 __u8 attr; /* Endpoint attribute */
1219 __u16 psize; /* Endpoint max packet size in bytes */
1220
1221 /* Get endpoint address, direction and attribute */
1222 ep = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
1223 dir = endpoint[i].desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK;
1224 attr = endpoint[i].desc.bmAttributes;
1225 psize = le16_to_cpu(endpoint[i].desc.wMaxPacketSize);
1226
1227 /* Is it a bulk endpoint ??? */
1228 if(attr == USB_ENDPOINT_XFER_BULK) {
1229 /* We need to find an IN and an OUT */
1230 if(dir == USB_DIR_IN) {
1231 /* This is our Rx endpoint */
1232 self->bulk_in_ep = ep;
1233 } else {
1234 /* This is our Tx endpoint */
1235 self->bulk_out_ep = ep;
1236 self->bulk_out_mtu = psize;
1237 }
1238 } else {
1239 if((attr == USB_ENDPOINT_XFER_INT) &&
1240 (dir == USB_DIR_IN)) {
1241 /* This is our interrupt endpoint */
1242 self->bulk_int_ep = ep;
1243 } else {
1244 IRDA_ERROR("%s(), Unrecognised endpoint %02X.\n", __FUNCTION__, ep);
1245 }
1246 }
1247 }
1248
1249 IRDA_DEBUG(0, "%s(), And our endpoints are : in=%02X, out=%02X (%d), int=%02X\n",
1250 __FUNCTION__, self->bulk_in_ep, self->bulk_out_ep, self->bulk_out_mtu, self->bulk_int_ep);
1251 /* Should be 8, 16, 32 or 64 bytes */
1252 IRDA_ASSERT(self->bulk_out_mtu == 64, ;);
1253
1254 return((self->bulk_in_ep != 0) && (self->bulk_out_ep != 0));
1255}
1256
1257#ifdef IU_DUMP_CLASS_DESC
1258/*------------------------------------------------------------------*/
1259/*
1260 * Function usb_irda_dump_class_desc(desc)
1261 *
1262 * Prints out the contents of the IrDA class descriptor
1263 *
1264 */
1265static inline void irda_usb_dump_class_desc(struct irda_class_desc *desc)
1266{
1267 /* Values are little endian */
1268 printk("bLength=%x\n", desc->bLength);
1269 printk("bDescriptorType=%x\n", desc->bDescriptorType);
1270 printk("bcdSpecRevision=%x\n", le16_to_cpu(desc->bcdSpecRevision));
1271 printk("bmDataSize=%x\n", desc->bmDataSize);
1272 printk("bmWindowSize=%x\n", desc->bmWindowSize);
1273 printk("bmMinTurnaroundTime=%d\n", desc->bmMinTurnaroundTime);
1274 printk("wBaudRate=%x\n", le16_to_cpu(desc->wBaudRate));
1275 printk("bmAdditionalBOFs=%x\n", desc->bmAdditionalBOFs);
1276 printk("bIrdaRateSniff=%x\n", desc->bIrdaRateSniff);
1277 printk("bMaxUnicastList=%x\n", desc->bMaxUnicastList);
1278}
1279#endif /* IU_DUMP_CLASS_DESC */
1280
1281/*------------------------------------------------------------------*/
1282/*
1283 * Function irda_usb_find_class_desc(intf)
1284 *
1285 * Returns instance of IrDA class descriptor, or NULL if not found
1286 *
1287 * The class descriptor is some extra info that IrDA USB devices will
1288 * offer to us, describing their IrDA characteristics. We will use that in
1289 * irda_usb_init_qos()
1290 */
1291static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interface *intf)
1292{
1293 struct usb_device *dev = interface_to_usbdev (intf);
1294 struct irda_class_desc *desc;
1295 int ret;
1296
1297 desc = kmalloc(sizeof (*desc), GFP_KERNEL);
1298 if (desc == NULL)
1299 return NULL;
1300 memset(desc, 0, sizeof(*desc));
1301
1302 /* USB-IrDA class spec 1.0:
1303 * 6.1.3: Standard "Get Descriptor" Device Request is not
1304 * appropriate to retrieve class-specific descriptor
1305 * 6.2.5: Class Specific "Get Class Descriptor" Interface Request
1306 * is mandatory and returns the USB-IrDA class descriptor
1307 */
1308
1309 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev,0),
1310 IU_REQ_GET_CLASS_DESC,
1311 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
1312 0, intf->altsetting->desc.bInterfaceNumber, desc,
1313 sizeof(*desc), 500);
1314
1315 IRDA_DEBUG(1, "%s(), ret=%d\n", __FUNCTION__, ret);
1316 if (ret < sizeof(*desc)) {
1317 IRDA_WARNING("usb-irda: class_descriptor read %s (%d)\n",
1318 (ret<0) ? "failed" : "too short", ret);
1319 }
1320 else if (desc->bDescriptorType != USB_DT_IRDA) {
1321 IRDA_WARNING("usb-irda: bad class_descriptor type\n");
1322 }
1323 else {
1324#ifdef IU_DUMP_CLASS_DESC
1325 irda_usb_dump_class_desc(desc);
1326#endif /* IU_DUMP_CLASS_DESC */
1327
1328 return desc;
1329 }
1330 kfree(desc);
1331 return NULL;
1332}
1333
1334/*********************** USB DEVICE CALLBACKS ***********************/
1335/*
1336 * Main calls from the USB subsystem.
1337 * Mostly registering a new irda-usb device and removing it....
1338 */
1339
1340/*------------------------------------------------------------------*/
1341/*
1342 * This routine is called by the USB subsystem for each new device
1343 * in the system. We need to check if the device is ours, and in
1344 * this case start handling it.
1345 * The USB layer protect us from reentrancy (via BKL), so we don't need
1346 * to spinlock in there... Jean II
1347 */
1348static int irda_usb_probe(struct usb_interface *intf,
1349 const struct usb_device_id *id)
1350{
1351 struct net_device *net;
1352 struct usb_device *dev = interface_to_usbdev(intf);
1353 struct irda_usb_cb *self = NULL;
1354 struct usb_host_interface *interface;
1355 struct irda_class_desc *irda_desc;
1356 int ret = -ENOMEM;
1357 int i; /* Driver instance index / Rx URB index */
1358
1359 /* Note : the probe make sure to call us only for devices that
1360 * matches the list of dongle (top of the file). So, we
1361 * don't need to check if the dongle is really ours.
1362 * Jean II */
1363
1364 IRDA_MESSAGE("IRDA-USB found at address %d, Vendor: %x, Product: %x\n",
1365 dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
1366 le16_to_cpu(dev->descriptor.idProduct));
1367
1368 net = alloc_irdadev(sizeof(*self));
1369 if (!net)
1370 goto err_out;
1371
1372 SET_MODULE_OWNER(net);
1373 SET_NETDEV_DEV(net, &intf->dev);
1374 self = net->priv;
1375 self->netdev = net;
1376 spin_lock_init(&self->lock);
1377
1378 /* Create all of the needed urbs */
1379 for (i = 0; i < IU_MAX_RX_URBS; i++) {
1380 self->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
1381 if (!self->rx_urb[i]) {
1382 goto err_out_1;
1383 }
1384 }
1385 self->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
1386 if (!self->tx_urb) {
1387 goto err_out_1;
1388 }
1389 self->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
1390 if (!self->speed_urb) {
1391 goto err_out_2;
1392 }
1393
1394 /* Is this really necessary? (no, except maybe for broken devices) */
1395 if (usb_reset_configuration (dev) < 0) {
1396 err("reset_configuration failed");
1397 ret = -EIO;
1398 goto err_out_3;
1399 }
1400
1401 /* Is this really necessary? */
1402 /* Note : some driver do hardcode the interface number, some others
1403 * specify an alternate, but very few driver do like this.
1404 * Jean II */
1405 ret = usb_set_interface(dev, intf->altsetting->desc.bInterfaceNumber, 0);
1406 IRDA_DEBUG(1, "usb-irda: set interface %d result %d\n", intf->altsetting->desc.bInterfaceNumber, ret);
1407 switch (ret) {
1408 case 0:
1409 break;
1410 case -EPIPE: /* -EPIPE = -32 */
1411 /* Martin Diehl says if we get a -EPIPE we should
1412 * be fine and we don't need to do a usb_clear_halt().
1413 * - Jean II */
1414 IRDA_DEBUG(0, "%s(), Received -EPIPE, ignoring...\n", __FUNCTION__);
1415 break;
1416 default:
1417 IRDA_DEBUG(0, "%s(), Unknown error %d\n", __FUNCTION__, ret);
1418 ret = -EIO;
1419 goto err_out_3;
1420 }
1421
1422 /* Find our endpoints */
1423 interface = intf->cur_altsetting;
1424 if(!irda_usb_parse_endpoints(self, interface->endpoint,
1425 interface->desc.bNumEndpoints)) {
1426 IRDA_ERROR("%s(), Bogus endpoints...\n", __FUNCTION__);
1427 ret = -EIO;
1428 goto err_out_3;
1429 }
1430
1431 /* Find IrDA class descriptor */
1432 irda_desc = irda_usb_find_class_desc(intf);
1433 ret = -ENODEV;
1434 if (irda_desc == NULL)
1435 goto err_out_3;
1436
1437 self->irda_desc = irda_desc;
1438 self->present = 1;
1439 self->netopen = 0;
1440 self->capability = id->driver_info;
1441 self->usbdev = dev;
1442 self->usbintf = intf;
1443
1444 /* Allocate the buffer for speed changes */
1445 /* Don't change this buffer size and allocation without doing
1446 * some heavy and complete testing. Don't ask why :-(
1447 * Jean II */
1448 self->speed_buff = (char *) kmalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL);
1449 if (self->speed_buff == NULL)
1450 goto err_out_3;
1451
1452 memset(self->speed_buff, 0, IRDA_USB_SPEED_MTU);
1453
1454 ret = irda_usb_open(self);
1455 if (ret)
1456 goto err_out_4;
1457
1458 IRDA_MESSAGE("IrDA: Registered device %s\n", net->name);
1459 usb_set_intfdata(intf, self);
1460 return 0;
1461
1462err_out_4:
1463 kfree(self->speed_buff);
1464err_out_3:
1465 /* Free all urbs that we may have created */
1466 usb_free_urb(self->speed_urb);
1467err_out_2:
1468 usb_free_urb(self->tx_urb);
1469err_out_1:
1470 for (i = 0; i < IU_MAX_RX_URBS; i++) {
1471 if (self->rx_urb[i])
1472 usb_free_urb(self->rx_urb[i]);
1473 }
1474 free_netdev(net);
1475err_out:
1476 return ret;
1477}
1478
1479/*------------------------------------------------------------------*/
1480/*
1481 * The current irda-usb device is removed, the USB layer tell us
1482 * to shut it down...
1483 * One of the constraints is that when we exit this function,
1484 * we cannot use the usb_device no more. Gone. Destroyed. kfree().
1485 * Most other subsystem allow you to destroy the instance at a time
1486 * when it's convenient to you, to postpone it to a later date, but
1487 * not the USB subsystem.
1488 * So, we must make bloody sure that everything gets deactivated.
1489 * Jean II
1490 */
1491static void irda_usb_disconnect(struct usb_interface *intf)
1492{
1493 unsigned long flags;
1494 struct irda_usb_cb *self = usb_get_intfdata(intf);
1495 int i;
1496
1497 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
1498
1499 usb_set_intfdata(intf, NULL);
1500 if (!self)
1501 return;
1502
1503 /* Make sure that the Tx path is not executing. - Jean II */
1504 spin_lock_irqsave(&self->lock, flags);
1505
1506 /* Oups ! We are not there any more.
1507 * This will stop/desactivate the Tx path. - Jean II */
1508 self->present = 0;
1509
1510 /* We need to have irq enabled to unlink the URBs. That's OK,
1511 * at this point the Tx path is gone - Jean II */
1512 spin_unlock_irqrestore(&self->lock, flags);
1513
1514 /* Hum... Check if networking is still active (avoid races) */
1515 if((self->netopen) || (self->irlap)) {
1516 /* Accept no more transmissions */
1517 /*netif_device_detach(self->netdev);*/
1518 netif_stop_queue(self->netdev);
1519 /* Stop all the receive URBs */
1520 for (i = 0; i < IU_MAX_RX_URBS; i++)
1521 usb_kill_urb(self->rx_urb[i]);
1522 /* Cancel Tx and speed URB.
1523 * Toggle flags to make sure it's synchronous. */
1524 self->tx_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
1525 usb_kill_urb(self->tx_urb);
1526 self->speed_urb->transfer_flags &= ~URB_ASYNC_UNLINK;
1527 usb_kill_urb(self->speed_urb);
1528 }
1529
1530 /* Cleanup the device stuff */
1531 irda_usb_close(self);
1532 /* No longer attached to USB bus */
1533 self->usbdev = NULL;
1534 self->usbintf = NULL;
1535
1536 /* Clean up our urbs */
1537 for (i = 0; i < IU_MAX_RX_URBS; i++)
1538 usb_free_urb(self->rx_urb[i]);
1539 /* Clean up Tx and speed URB */
1540 usb_free_urb(self->tx_urb);
1541 usb_free_urb(self->speed_urb);
1542
1543 /* Free self and network device */
1544 free_netdev(self->netdev);
1545 IRDA_DEBUG(0, "%s(), USB IrDA Disconnected\n", __FUNCTION__);
1546}
1547
1548/*------------------------------------------------------------------*/
1549/*
1550 * USB device callbacks
1551 */
1552static struct usb_driver irda_driver = {
1553 .owner = THIS_MODULE,
1554 .name = "irda-usb",
1555 .probe = irda_usb_probe,
1556 .disconnect = irda_usb_disconnect,
1557 .id_table = dongles,
1558};
1559
1560/************************* MODULE CALLBACKS *************************/
1561/*
1562 * Deal with module insertion/removal
1563 * Mostly tell USB about our existence
1564 */
1565
1566/*------------------------------------------------------------------*/
1567/*
1568 * Module insertion
1569 */
1570static int __init usb_irda_init(void)
1571{
1572 int ret;
1573
1574 ret = usb_register(&irda_driver);
1575 if (ret < 0)
1576 return ret;
1577
1578 IRDA_MESSAGE("USB IrDA support registered\n");
1579 return 0;
1580}
1581module_init(usb_irda_init);
1582
1583/*------------------------------------------------------------------*/
1584/*
1585 * Module removal
1586 */
1587static void __exit usb_irda_cleanup(void)
1588{
1589 /* Deregister the driver and remove all pending instances */
1590 usb_deregister(&irda_driver);
1591}
1592module_exit(usb_irda_cleanup);
1593
1594/*------------------------------------------------------------------*/
1595/*
1596 * Module parameters
1597 */
1598module_param(qos_mtt_bits, int, 0);
1599MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
1600MODULE_AUTHOR("Roman Weissgaerber <weissg@vienna.at>, Dag Brattli <dag@brattli.net> and Jean Tourrilhes <jt@hpl.hp.com>");
1601MODULE_DESCRIPTION("IrDA-USB Dongle Driver");
1602MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h
new file mode 100644
index 000000000000..bd8f66542322
--- /dev/null
+++ b/drivers/net/irda/irda-usb.h
@@ -0,0 +1,163 @@
1/*****************************************************************************
2 *
3 * Filename: irda-usb.h
4 * Version: 0.9b
5 * Description: IrDA-USB Driver
6 * Status: Experimental
7 * Author: Dag Brattli <dag@brattli.net>
8 *
9 * Copyright (C) 2001, Roman Weissgaerber <weissg@vienna.at>
10 * Copyright (C) 2000, Dag Brattli <dag@brattli.net>
11 * Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *****************************************************************************/
28
29#include <linux/time.h>
30
31#include <net/irda/irda.h>
32#include <net/irda/irda_device.h> /* struct irlap_cb */
33
34#define RX_COPY_THRESHOLD 200
35#define IRDA_USB_MAX_MTU 2051
36#define IRDA_USB_SPEED_MTU 64 /* Weird, but work like this */
37
38/* Maximum number of active URB on the Rx path
39 * This is the amount of buffers the we keep between the USB harware and the
40 * IrDA stack.
41 *
42 * Note : the network layer does also queue the packets between us and the
43 * IrDA stack, and is actually pretty fast and efficient in doing that.
44 * Therefore, we don't need to have a large number of URBs, and we can
45 * perfectly live happy with only one. We certainly don't need to keep the
46 * full IrTTP window around here...
47 * I repeat for those who have trouble to understand : 1 URB is plenty
48 * good enough to handle back-to-back (brickwalled) frames. I tried it,
49 * it works (it's the hardware that has trouble doing it).
50 *
51 * Having 2 URBs would allow the USB stack to process one URB while we take
52 * care of the other and then swap the URBs...
53 * On the other hand, increasing the number of URB will have penalities
54 * in term of latency and will interact with the link management in IrLAP...
55 * Jean II */
56#define IU_MAX_ACTIVE_RX_URBS 1 /* Don't touch !!! */
57
58/* When a Rx URB is passed back to us, we can't reuse it immediately,
59 * because it may still be referenced by the USB layer. Therefore we
60 * need to keep one extra URB in the Rx path.
61 * Jean II */
62#define IU_MAX_RX_URBS (IU_MAX_ACTIVE_RX_URBS + 1)
63
64/* Various ugly stuff to try to workaround generic problems */
65/* Send speed command in case of timeout, just for trying to get things sane */
66#define IU_BUG_KICK_TIMEOUT
67/* Show the USB class descriptor */
68#undef IU_DUMP_CLASS_DESC
69/* Assume a minimum round trip latency for USB transfer (in us)...
70 * USB transfer are done in the next USB slot if there is no traffic
71 * (1/19 msec) and is done at 12 Mb/s :
72 * Waiting for slot + tx = (53us + 16us) * 2 = 137us minimum.
73 * Rx notification will only be done at the end of the USB frame period :
74 * OHCI : frame period = 1ms
75 * UHCI : frame period = 1ms, but notification can take 2 or 3 ms :-(
76 * EHCI : frame period = 125us */
77#define IU_USB_MIN_RTT 500 /* This should be safe in most cases */
78
79/* Inbound header */
80#define MEDIA_BUSY 0x80
81
82#define SPEED_2400 0x01
83#define SPEED_9600 0x02
84#define SPEED_19200 0x03
85#define SPEED_38400 0x04
86#define SPEED_57600 0x05
87#define SPEED_115200 0x06
88#define SPEED_576000 0x07
89#define SPEED_1152000 0x08
90#define SPEED_4000000 0x09
91
92/* Basic capabilities */
93#define IUC_DEFAULT 0x00 /* Basic device compliant with 1.0 spec */
94/* Main bugs */
95#define IUC_SPEED_BUG 0x01 /* Device doesn't set speed after the frame */
96#define IUC_NO_WINDOW 0x02 /* Device doesn't behave with big Rx window */
97#define IUC_NO_TURN 0x04 /* Device doesn't do turnaround by itself */
98/* Not currently used */
99#define IUC_SIR_ONLY 0x08 /* Device doesn't behave at FIR speeds */
100#define IUC_SMALL_PKT 0x10 /* Device doesn't behave with big Rx packets */
101#define IUC_MAX_WINDOW 0x20 /* Device underestimate the Rx window */
102#define IUC_MAX_XBOFS 0x40 /* Device need more xbofs than advertised */
103
104/* USB class definitions */
105#define USB_IRDA_HEADER 0x01
106#define USB_CLASS_IRDA 0x02 /* USB_CLASS_APP_SPEC subclass */
107#define USB_DT_IRDA 0x21
108
109struct irda_class_desc {
110 __u8 bLength;
111 __u8 bDescriptorType;
112 __u16 bcdSpecRevision;
113 __u8 bmDataSize;
114 __u8 bmWindowSize;
115 __u8 bmMinTurnaroundTime;
116 __u16 wBaudRate;
117 __u8 bmAdditionalBOFs;
118 __u8 bIrdaRateSniff;
119 __u8 bMaxUnicastList;
120} __attribute__ ((packed));
121
122/* class specific interface request to get the IrDA-USB class descriptor
123 * (6.2.5, USB-IrDA class spec 1.0) */
124
125#define IU_REQ_GET_CLASS_DESC 0x06
126
127struct irda_usb_cb {
128 struct irda_class_desc *irda_desc;
129 struct usb_device *usbdev; /* init: probe_irda */
130 struct usb_interface *usbintf; /* init: probe_irda */
131 int netopen; /* Device is active for network */
132 int present; /* Device is present on the bus */
133 __u32 capability; /* Capability of the hardware */
134 __u8 bulk_in_ep; /* Rx Endpoint assignments */
135 __u8 bulk_out_ep; /* Tx Endpoint assignments */
136 __u16 bulk_out_mtu; /* Max Tx packet size in bytes */
137 __u8 bulk_int_ep; /* Interrupt Endpoint assignments */
138
139 wait_queue_head_t wait_q; /* for timeouts */
140
141 struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */
142 struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
143 struct urb *tx_urb; /* URB used to send data frames */
144 struct urb *speed_urb; /* URB used to send speed commands */
145
146 struct net_device *netdev; /* Yes! we are some kind of netdev. */
147 struct net_device_stats stats;
148 struct irlap_cb *irlap; /* The link layer we are binded to */
149 struct qos_info qos;
150 hashbin_t *tx_list; /* Queued transmit skb's */
151 char *speed_buff; /* Buffer for speed changes */
152
153 struct timeval stamp;
154 struct timeval now;
155
156 spinlock_t lock; /* For serializing operations */
157
158 __u16 xbofs; /* Current xbofs setting */
159 __s16 new_xbofs; /* xbofs we need to set */
160 __u32 speed; /* Current speed */
161 __s32 new_speed; /* speed we need to set */
162};
163
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
new file mode 100644
index 000000000000..5971315f3fa0
--- /dev/null
+++ b/drivers/net/irda/irport.c
@@ -0,0 +1,1146 @@
1/*********************************************************************
2 *
3 * Filename: irport.c
4 * Version: 1.0
5 * Description: Half duplex serial port SIR driver for IrDA.
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sun Aug 3 13:49:59 1997
9 * Modified at: Fri Jan 28 20:22:38 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Sources: serial.c by Linus Torvalds
12 *
13 * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved.
14 * Copyright (c) 2000-2003 Jean Tourrilhes, All Rights Reserved.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
29 * MA 02111-1307 USA
30 *
31 * This driver is ment to be a small half duplex serial driver to be
32 * used for IR-chipsets that has a UART (16550) compatibility mode.
33 * Eventually it will replace irtty, because of irtty has some
34 * problems that is hard to get around when we don't have control
35 * over the serial driver. This driver may also be used by FIR
36 * drivers to handle SIR mode for them.
37 *
38 ********************************************************************/
39
40#include <linux/module.h>
41
42#include <linux/kernel.h>
43#include <linux/types.h>
44#include <linux/ioport.h>
45#include <linux/slab.h>
46#include <linux/string.h>
47#include <linux/skbuff.h>
48#include <linux/serial_reg.h>
49#include <linux/errno.h>
50#include <linux/init.h>
51#include <linux/spinlock.h>
52#include <linux/delay.h>
53#include <linux/rtnetlink.h>
54#include <linux/bitops.h>
55
56#include <asm/system.h>
57#include <asm/io.h>
58
59#include <net/irda/irda.h>
60#include <net/irda/wrapper.h>
61#include "irport.h"
62
63#define IO_EXTENT 8
64
65/*
66 * Currently you'll need to set these values using insmod like this:
67 * insmod irport io=0x3e8 irq=11
68 */
69static unsigned int io[] = { ~0, ~0, ~0, ~0 };
70static unsigned int irq[] = { 0, 0, 0, 0 };
71
72static unsigned int qos_mtt_bits = 0x03;
73
74static struct irport_cb *dev_self[] = { NULL, NULL, NULL, NULL};
75static char *driver_name = "irport";
76
77static inline void irport_write_wakeup(struct irport_cb *self);
78static inline int irport_write(int iobase, int fifo_size, __u8 *buf, int len);
79static inline void irport_receive(struct irport_cb *self);
80
81static int irport_net_ioctl(struct net_device *dev, struct ifreq *rq,
82 int cmd);
83static inline int irport_is_receiving(struct irport_cb *self);
84static int irport_set_dtr_rts(struct net_device *dev, int dtr, int rts);
85static int irport_raw_write(struct net_device *dev, __u8 *buf, int len);
86static struct net_device_stats *irport_net_get_stats(struct net_device *dev);
87static int irport_change_speed_complete(struct irda_task *task);
88static void irport_timeout(struct net_device *dev);
89
90static irqreturn_t irport_interrupt(int irq, void *dev_id,
91 struct pt_regs *regs);
92static int irport_hard_xmit(struct sk_buff *skb, struct net_device *dev);
93static void irport_change_speed(void *priv, __u32 speed);
94static int irport_net_open(struct net_device *dev);
95static int irport_net_close(struct net_device *dev);
96
97static struct irport_cb *
98irport_open(int i, unsigned int iobase, unsigned int irq)
99{
100 struct net_device *dev;
101 struct irport_cb *self;
102
103 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
104
105 /* Lock the port that we need */
106 if (!request_region(iobase, IO_EXTENT, driver_name)) {
107 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
108 __FUNCTION__, iobase);
109 goto err_out1;
110 }
111
112 /*
113 * Allocate new instance of the driver
114 */
115 dev = alloc_irdadev(sizeof(struct irport_cb));
116 if (!dev) {
117 IRDA_ERROR("%s(), can't allocate memory for "
118 "irda device!\n", __FUNCTION__);
119 goto err_out2;
120 }
121
122 self = dev->priv;
123 spin_lock_init(&self->lock);
124
125 /* Need to store self somewhere */
126 dev_self[i] = self;
127 self->priv = self;
128 self->index = i;
129
130 /* Initialize IO */
131 self->io.sir_base = iobase;
132 self->io.sir_ext = IO_EXTENT;
133 self->io.irq = irq;
134 self->io.fifo_size = 16; /* 16550A and compatible */
135
136 /* Initialize QoS for this device */
137 irda_init_max_qos_capabilies(&self->qos);
138
139 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
140 IR_115200;
141
142 self->qos.min_turn_time.bits = qos_mtt_bits;
143 irda_qos_bits_to_value(&self->qos);
144
145 /* Bootstrap ZeroCopy Rx */
146 self->rx_buff.truesize = IRDA_SKB_MAX_MTU;
147 self->rx_buff.skb = __dev_alloc_skb(self->rx_buff.truesize,
148 GFP_KERNEL);
149 if (self->rx_buff.skb == NULL) {
150 IRDA_ERROR("%s(), can't allocate memory for "
151 "receive buffer!\n", __FUNCTION__);
152 goto err_out3;
153 }
154 skb_reserve(self->rx_buff.skb, 1);
155 self->rx_buff.head = self->rx_buff.skb->data;
156 /* No need to memset the buffer, unless you are really pedantic */
157
158 /* Finish setup the Rx buffer descriptor */
159 self->rx_buff.in_frame = FALSE;
160 self->rx_buff.state = OUTSIDE_FRAME;
161 self->rx_buff.data = self->rx_buff.head;
162
163 /* Specify how much memory we want */
164 self->tx_buff.truesize = 4000;
165
166 /* Allocate memory if needed */
167 if (self->tx_buff.truesize > 0) {
168 self->tx_buff.head = (__u8 *) kmalloc(self->tx_buff.truesize,
169 GFP_KERNEL);
170 if (self->tx_buff.head == NULL) {
171 IRDA_ERROR("%s(), can't allocate memory for "
172 "transmit buffer!\n", __FUNCTION__);
173 goto err_out4;
174 }
175 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
176 }
177 self->tx_buff.data = self->tx_buff.head;
178
179 self->netdev = dev;
180 /* Keep track of module usage */
181 SET_MODULE_OWNER(dev);
182
183 /* May be overridden by piggyback drivers */
184 self->interrupt = irport_interrupt;
185 self->change_speed = irport_change_speed;
186
187 /* Override the network functions we need to use */
188 dev->hard_start_xmit = irport_hard_xmit;
189 dev->tx_timeout = irport_timeout;
190 dev->watchdog_timeo = HZ; /* Allow time enough for speed change */
191 dev->open = irport_net_open;
192 dev->stop = irport_net_close;
193 dev->get_stats = irport_net_get_stats;
194 dev->do_ioctl = irport_net_ioctl;
195
196 /* Make ifconfig display some details */
197 dev->base_addr = iobase;
198 dev->irq = irq;
199
200 if (register_netdev(dev)) {
201 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
202 goto err_out5;
203 }
204 IRDA_MESSAGE("IrDA: Registered device %s (irport io=0x%X irq=%d)\n",
205 dev->name, iobase, irq);
206
207 return self;
208 err_out5:
209 kfree(self->tx_buff.head);
210 err_out4:
211 kfree_skb(self->rx_buff.skb);
212 err_out3:
213 free_netdev(dev);
214 dev_self[i] = NULL;
215 err_out2:
216 release_region(iobase, IO_EXTENT);
217 err_out1:
218 return NULL;
219}
220
221static int irport_close(struct irport_cb *self)
222{
223 IRDA_ASSERT(self != NULL, return -1;);
224
225 /* We are not using any dongle anymore! */
226 if (self->dongle)
227 irda_device_dongle_cleanup(self->dongle);
228 self->dongle = NULL;
229
230 /* Remove netdevice */
231 unregister_netdev(self->netdev);
232
233 /* Release the IO-port that this driver is using */
234 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
235 __FUNCTION__, self->io.sir_base);
236 release_region(self->io.sir_base, self->io.sir_ext);
237
238 if (self->tx_buff.head)
239 kfree(self->tx_buff.head);
240
241 if (self->rx_buff.skb)
242 kfree_skb(self->rx_buff.skb);
243 self->rx_buff.skb = NULL;
244
245 /* Remove ourselves */
246 dev_self[self->index] = NULL;
247 free_netdev(self->netdev);
248
249 return 0;
250}
251
252static void irport_stop(struct irport_cb *self)
253{
254 int iobase;
255
256 iobase = self->io.sir_base;
257
258 /* We can't lock, we may be called from a FIR driver - Jean II */
259
260 /* We are not transmitting any more */
261 self->transmitting = 0;
262
263 /* Reset UART */
264 outb(0, iobase+UART_MCR);
265
266 /* Turn off interrupts */
267 outb(0, iobase+UART_IER);
268}
269
270static void irport_start(struct irport_cb *self)
271{
272 int iobase;
273
274 iobase = self->io.sir_base;
275
276 irport_stop(self);
277
278 /* We can't lock, we may be called from a FIR driver - Jean II */
279
280 /* Initialize UART */
281 outb(UART_LCR_WLEN8, iobase+UART_LCR); /* Reset DLAB */
282 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
283
284 /* Turn on interrups */
285 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, iobase+UART_IER);
286}
287
288/*
289 * Function irport_probe (void)
290 *
291 * Start IO port
292 *
293 */
294int irport_probe(int iobase)
295{
296 IRDA_DEBUG(4, "%s(), iobase=%#x\n", __FUNCTION__, iobase);
297
298 return 0;
299}
300
301/*
302 * Function irport_get_fcr (speed)
303 *
304 * Compute value of fcr
305 *
306 */
307static inline unsigned int irport_get_fcr(__u32 speed)
308{
309 unsigned int fcr; /* FIFO control reg */
310
311 /* Enable fifos */
312 fcr = UART_FCR_ENABLE_FIFO;
313
314 /*
315 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
316 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
317 * about this timeout since it will always be fast enough.
318 */
319 if (speed < 38400)
320 fcr |= UART_FCR_TRIGGER_1;
321 else
322 //fcr |= UART_FCR_TRIGGER_14;
323 fcr |= UART_FCR_TRIGGER_8;
324
325 return(fcr);
326}
327
328/*
329 * Function irport_change_speed (self, speed)
330 *
331 * Set speed of IrDA port to specified baudrate
332 *
333 * This function should be called with irq off and spin-lock.
334 */
335static void irport_change_speed(void *priv, __u32 speed)
336{
337 struct irport_cb *self = (struct irport_cb *) priv;
338 int iobase;
339 unsigned int fcr; /* FIFO control reg */
340 unsigned int lcr; /* Line control reg */
341 int divisor;
342
343 IRDA_ASSERT(self != NULL, return;);
344 IRDA_ASSERT(speed != 0, return;);
345
346 IRDA_DEBUG(1, "%s(), Setting speed to: %d - iobase=%#x\n",
347 __FUNCTION__, speed, self->io.sir_base);
348
349 /* We can't lock, we may be called from a FIR driver - Jean II */
350
351 iobase = self->io.sir_base;
352
353 /* Update accounting for new speed */
354 self->io.speed = speed;
355
356 /* Turn off interrupts */
357 outb(0, iobase+UART_IER);
358
359 divisor = SPEED_MAX/speed;
360
361 /* Get proper fifo configuration */
362 fcr = irport_get_fcr(speed);
363
364 /* IrDA ports use 8N1 */
365 lcr = UART_LCR_WLEN8;
366
367 outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
368 outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
369 outb(divisor >> 8, iobase+UART_DLM);
370 outb(lcr, iobase+UART_LCR); /* Set 8N1 */
371 outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
372
373 /* Turn on interrups */
374 /* This will generate a fatal interrupt storm.
375 * People calling us will do that properly - Jean II */
376 //outb(/*UART_IER_RLSI|*/UART_IER_RDI/*|UART_IER_THRI*/, iobase+UART_IER);
377}
378
379/*
380 * Function __irport_change_speed (instance, state, param)
381 *
382 * State machine for changing speed of the device. We do it this way since
383 * we cannot use schedule_timeout() when we are in interrupt context
384 *
385 */
386int __irport_change_speed(struct irda_task *task)
387{
388 struct irport_cb *self;
389 __u32 speed = (__u32) task->param;
390 unsigned long flags = 0;
391 int wasunlocked = 0;
392 int ret = 0;
393
394 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
395
396 self = (struct irport_cb *) task->instance;
397
398 IRDA_ASSERT(self != NULL, return -1;);
399
400 /* Locking notes : this function may be called from irq context with
401 * spinlock, via irport_write_wakeup(), or from non-interrupt without
402 * spinlock (from the task timer). Yuck !
403 * This is ugly, and unsafe is the spinlock is not already aquired.
404 * This will be fixed when irda-task get rewritten.
405 * Jean II */
406 if (!spin_is_locked(&self->lock)) {
407 spin_lock_irqsave(&self->lock, flags);
408 wasunlocked = 1;
409 }
410
411 switch (task->state) {
412 case IRDA_TASK_INIT:
413 case IRDA_TASK_WAIT:
414 /* Are we ready to change speed yet? */
415 if (self->tx_buff.len > 0) {
416 task->state = IRDA_TASK_WAIT;
417
418 /* Try again later */
419 ret = msecs_to_jiffies(20);
420 break;
421 }
422
423 if (self->dongle)
424 irda_task_next_state(task, IRDA_TASK_CHILD_INIT);
425 else
426 irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
427 break;
428 case IRDA_TASK_CHILD_INIT:
429 /* Go to default speed */
430 self->change_speed(self->priv, 9600);
431
432 /* Change speed of dongle */
433 if (irda_task_execute(self->dongle,
434 self->dongle->issue->change_speed,
435 NULL, task, (void *) speed))
436 {
437 /* Dongle need more time to change its speed */
438 irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
439
440 /* Give dongle 1 sec to finish */
441 ret = msecs_to_jiffies(1000);
442 } else
443 /* Child finished immediately */
444 irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
445 break;
446 case IRDA_TASK_CHILD_WAIT:
447 IRDA_WARNING("%s(), changing speed of dongle timed out!\n", __FUNCTION__);
448 ret = -1;
449 break;
450 case IRDA_TASK_CHILD_DONE:
451 /* Finally we are ready to change the speed */
452 self->change_speed(self->priv, speed);
453
454 irda_task_next_state(task, IRDA_TASK_DONE);
455 break;
456 default:
457 IRDA_ERROR("%s(), unknown state %d\n",
458 __FUNCTION__, task->state);
459 irda_task_next_state(task, IRDA_TASK_DONE);
460 ret = -1;
461 break;
462 }
463 /* Put stuff in the state we found them - Jean II */
464 if(wasunlocked) {
465 spin_unlock_irqrestore(&self->lock, flags);
466 }
467
468 return ret;
469}
470
471/*
472 * Function irport_change_speed_complete (task)
473 *
474 * Called when the change speed operation completes
475 *
476 */
477static int irport_change_speed_complete(struct irda_task *task)
478{
479 struct irport_cb *self;
480
481 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
482
483 self = (struct irport_cb *) task->instance;
484
485 IRDA_ASSERT(self != NULL, return -1;);
486 IRDA_ASSERT(self->netdev != NULL, return -1;);
487
488 /* Finished changing speed, so we are not busy any longer */
489 /* Signal network layer so it can try to send the frame */
490
491 netif_wake_queue(self->netdev);
492
493 return 0;
494}
495
496/*
497 * Function irport_timeout (struct net_device *dev)
498 *
499 * The networking layer thinks we timed out.
500 *
501 */
502
503static void irport_timeout(struct net_device *dev)
504{
505 struct irport_cb *self;
506 int iobase;
507 int iir, lsr;
508 unsigned long flags;
509
510 self = (struct irport_cb *) dev->priv;
511 IRDA_ASSERT(self != NULL, return;);
512 iobase = self->io.sir_base;
513
514 IRDA_WARNING("%s: transmit timed out, jiffies = %ld, trans_start = %ld\n",
515 dev->name, jiffies, dev->trans_start);
516 spin_lock_irqsave(&self->lock, flags);
517
518 /* Debug what's happening... */
519
520 /* Get interrupt status */
521 lsr = inb(iobase+UART_LSR);
522 /* Read interrupt register */
523 iir = inb(iobase+UART_IIR);
524 IRDA_DEBUG(0, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
525 __FUNCTION__, iir, lsr, iobase);
526
527 IRDA_DEBUG(0, "%s(), transmitting=%d, remain=%d, done=%d\n",
528 __FUNCTION__, self->transmitting, self->tx_buff.len,
529 self->tx_buff.data - self->tx_buff.head);
530
531 /* Now, restart the port */
532 irport_start(self);
533 self->change_speed(self->priv, self->io.speed);
534 /* This will re-enable irqs */
535 outb(/*UART_IER_RLSI|*/UART_IER_RDI/*|UART_IER_THRI*/, iobase+UART_IER);
536 dev->trans_start = jiffies;
537 spin_unlock_irqrestore(&self->lock, flags);
538
539 netif_wake_queue(dev);
540}
541
542/*
543 * Function irport_wait_hw_transmitter_finish ()
544 *
545 * Wait for the real end of HW transmission
546 *
547 * The UART is a strict FIFO, and we get called only when we have finished
548 * pushing data to the FIFO, so the maximum amount of time we must wait
549 * is only for the FIFO to drain out.
550 *
551 * We use a simple calibrated loop. We may need to adjust the loop
552 * delay (udelay) to balance I/O traffic and latency. And we also need to
553 * adjust the maximum timeout.
554 * It would probably be better to wait for the proper interrupt,
555 * but it doesn't seem to be available.
556 *
557 * We can't use jiffies or kernel timers because :
558 * 1) We are called from the interrupt handler, which disable softirqs,
559 * so jiffies won't be increased
560 * 2) Jiffies granularity is usually very coarse (10ms), and we don't
561 * want to wait that long to detect stuck hardware.
562 * Jean II
563 */
564
565static void irport_wait_hw_transmitter_finish(struct irport_cb *self)
566{
567 int iobase;
568 int count = 1000; /* 1 ms */
569
570 iobase = self->io.sir_base;
571
572 /* Calibrated busy loop */
573 while((count-- > 0) && !(inb(iobase+UART_LSR) & UART_LSR_TEMT))
574 udelay(1);
575
576 if(count == 0)
577 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__);
578}
579
580/*
581 * Function irport_hard_start_xmit (struct sk_buff *skb, struct net_device *dev)
582 *
583 * Transmits the current frame until FIFO is full, then
584 * waits until the next transmitt interrupt, and continues until the
585 * frame is transmitted.
586 */
587static int irport_hard_xmit(struct sk_buff *skb, struct net_device *dev)
588{
589 struct irport_cb *self;
590 unsigned long flags;
591 int iobase;
592 s32 speed;
593
594 IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
595
596 IRDA_ASSERT(dev != NULL, return 0;);
597
598 self = (struct irport_cb *) dev->priv;
599 IRDA_ASSERT(self != NULL, return 0;);
600
601 iobase = self->io.sir_base;
602
603 netif_stop_queue(dev);
604
605 /* Make sure tests & speed change are atomic */
606 spin_lock_irqsave(&self->lock, flags);
607
608 /* Check if we need to change the speed */
609 speed = irda_get_next_speed(skb);
610 if ((speed != self->io.speed) && (speed != -1)) {
611 /* Check for empty frame */
612 if (!skb->len) {
613 /*
614 * We send frames one by one in SIR mode (no
615 * pipelining), so at this point, if we were sending
616 * a previous frame, we just received the interrupt
617 * telling us it is finished (UART_IIR_THRI).
618 * Therefore, waiting for the transmitter to really
619 * finish draining the fifo won't take too long.
620 * And the interrupt handler is not expected to run.
621 * - Jean II */
622 irport_wait_hw_transmitter_finish(self);
623 /* Better go there already locked - Jean II */
624 irda_task_execute(self, __irport_change_speed,
625 irport_change_speed_complete,
626 NULL, (void *) speed);
627 dev->trans_start = jiffies;
628 spin_unlock_irqrestore(&self->lock, flags);
629 dev_kfree_skb(skb);
630 return 0;
631 } else
632 self->new_speed = speed;
633 }
634
635 /* Init tx buffer */
636 self->tx_buff.data = self->tx_buff.head;
637
638 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
639 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
640 self->tx_buff.truesize);
641
642 self->stats.tx_bytes += self->tx_buff.len;
643
644 /* We are transmitting */
645 self->transmitting = 1;
646
647 /* Turn on transmit finished interrupt. Will fire immediately! */
648 outb(UART_IER_THRI, iobase+UART_IER);
649
650 dev->trans_start = jiffies;
651 spin_unlock_irqrestore(&self->lock, flags);
652
653 dev_kfree_skb(skb);
654
655 return 0;
656}
657
658/*
659 * Function irport_write (driver)
660 *
661 * Fill Tx FIFO with transmit data
662 *
663 * Called only from irport_write_wakeup()
664 */
665static inline int irport_write(int iobase, int fifo_size, __u8 *buf, int len)
666{
667 int actual = 0;
668
669 /* Fill FIFO with current frame */
670 while ((actual < fifo_size) && (actual < len)) {
671 /* Transmit next byte */
672 outb(buf[actual], iobase+UART_TX);
673
674 actual++;
675 }
676
677 return actual;
678}
679
680/*
681 * Function irport_write_wakeup (tty)
682 *
683 * Called by the driver when there's room for more data. If we have
684 * more packets to send, we send them here.
685 *
686 * Called only from irport_interrupt()
687 * Make sure this function is *not* called while we are receiving,
688 * otherwise we will reset fifo and loose data :-(
689 */
690static inline void irport_write_wakeup(struct irport_cb *self)
691{
692 int actual = 0;
693 int iobase;
694 unsigned int fcr;
695
696 IRDA_ASSERT(self != NULL, return;);
697
698 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
699
700 iobase = self->io.sir_base;
701
702 /* Finished with frame? */
703 if (self->tx_buff.len > 0) {
704 /* Write data left in transmit buffer */
705 actual = irport_write(iobase, self->io.fifo_size,
706 self->tx_buff.data, self->tx_buff.len);
707 self->tx_buff.data += actual;
708 self->tx_buff.len -= actual;
709 } else {
710 /*
711 * Now serial buffer is almost free & we can start
712 * transmission of another packet. But first we must check
713 * if we need to change the speed of the hardware
714 */
715 if (self->new_speed) {
716 irport_wait_hw_transmitter_finish(self);
717 irda_task_execute(self, __irport_change_speed,
718 irport_change_speed_complete,
719 NULL, (void *) self->new_speed);
720 self->new_speed = 0;
721 } else {
722 /* Tell network layer that we want more frames */
723 netif_wake_queue(self->netdev);
724 }
725 self->stats.tx_packets++;
726
727 /*
728 * Reset Rx FIFO to make sure that all reflected transmit data
729 * is discarded. This is needed for half duplex operation
730 */
731 fcr = irport_get_fcr(self->io.speed);
732 fcr |= UART_FCR_CLEAR_RCVR;
733 outb(fcr, iobase+UART_FCR);
734
735 /* Finished transmitting */
736 self->transmitting = 0;
737
738 /* Turn on receive interrupts */
739 outb(UART_IER_RDI, iobase+UART_IER);
740
741 IRDA_DEBUG(1, "%s() : finished Tx\n", __FUNCTION__);
742 }
743}
744
745/*
746 * Function irport_receive (self)
747 *
748 * Receive one frame from the infrared port
749 *
750 * Called only from irport_interrupt()
751 */
752static inline void irport_receive(struct irport_cb *self)
753{
754 int boguscount = 0;
755 int iobase;
756
757 IRDA_ASSERT(self != NULL, return;);
758
759 iobase = self->io.sir_base;
760
761 /*
762 * Receive all characters in Rx FIFO, unwrap and unstuff them.
763 * async_unwrap_char will deliver all found frames
764 */
765 do {
766 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
767 inb(iobase+UART_RX));
768
769 /* Make sure we don't stay here too long */
770 if (boguscount++ > 32) {
771 IRDA_DEBUG(2,"%s(), breaking!\n", __FUNCTION__);
772 break;
773 }
774 } while (inb(iobase+UART_LSR) & UART_LSR_DR);
775}
776
777/*
778 * Function irport_interrupt (irq, dev_id, regs)
779 *
780 * Interrupt handler
781 */
782static irqreturn_t irport_interrupt(int irq, void *dev_id,
783 struct pt_regs *regs)
784{
785 struct net_device *dev = (struct net_device *) dev_id;
786 struct irport_cb *self;
787 int boguscount = 0;
788 int iobase;
789 int iir, lsr;
790 int handled = 0;
791
792 if (!dev) {
793 IRDA_WARNING("%s() irq %d for unknown device.\n", __FUNCTION__, irq);
794 return IRQ_NONE;
795 }
796 self = (struct irport_cb *) dev->priv;
797
798 spin_lock(&self->lock);
799
800 iobase = self->io.sir_base;
801
802 /* Cut'n'paste interrupt routine from serial.c
803 * This version try to minimise latency and I/O operations.
804 * Simplified and modified to enforce half duplex operation.
805 * - Jean II */
806
807 /* Check status even is iir reg is cleared, more robust and
808 * eliminate a read on the I/O bus - Jean II */
809 do {
810 /* Get interrupt status ; Clear interrupt */
811 lsr = inb(iobase+UART_LSR);
812
813 /* Are we receiving or transmitting ? */
814 if(!self->transmitting) {
815 /* Received something ? */
816 if (lsr & UART_LSR_DR)
817 irport_receive(self);
818 } else {
819 /* Room in Tx fifo ? */
820 if (lsr & (UART_LSR_THRE | UART_LSR_TEMT))
821 irport_write_wakeup(self);
822 }
823
824 /* A bit hackish, but working as expected... Jean II */
825 if(lsr & (UART_LSR_THRE | UART_LSR_TEMT | UART_LSR_DR))
826 handled = 1;
827
828 /* Make sure we don't stay here to long */
829 if (boguscount++ > 10) {
830 IRDA_WARNING("%s() irq handler looping : lsr=%02x\n",
831 __FUNCTION__, lsr);
832 break;
833 }
834
835 /* Read interrupt register */
836 iir = inb(iobase+UART_IIR);
837
838 /* Enable this debug only when no other options and at low
839 * bit rates, otherwise it may cause Rx overruns (lsr=63).
840 * - Jean II */
841 IRDA_DEBUG(6, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
842 __FUNCTION__, iir, lsr, iobase);
843
844 /* As long as interrupt pending... */
845 } while ((iir & UART_IIR_NO_INT) == 0);
846
847 spin_unlock(&self->lock);
848 return IRQ_RETVAL(handled);
849}
850
851/*
852 * Function irport_net_open (dev)
853 *
854 * Network device is taken up. Usually this is done by "ifconfig irda0 up"
855 *
856 */
857static int irport_net_open(struct net_device *dev)
858{
859 struct irport_cb *self;
860 int iobase;
861 char hwname[16];
862 unsigned long flags;
863
864 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
865
866 IRDA_ASSERT(dev != NULL, return -1;);
867 self = (struct irport_cb *) dev->priv;
868
869 iobase = self->io.sir_base;
870
871 if (request_irq(self->io.irq, self->interrupt, 0, dev->name,
872 (void *) dev)) {
873 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
874 __FUNCTION__, self->io.irq);
875 return -EAGAIN;
876 }
877
878 spin_lock_irqsave(&self->lock, flags);
879 /* Init uart */
880 irport_start(self);
881 /* Set 9600 bauds per default, including at the dongle */
882 irda_task_execute(self, __irport_change_speed,
883 irport_change_speed_complete,
884 NULL, (void *) 9600);
885 spin_unlock_irqrestore(&self->lock, flags);
886
887
888 /* Give self a hardware name */
889 sprintf(hwname, "SIR @ 0x%03x", self->io.sir_base);
890
891 /*
892 * Open new IrLAP layer instance, now that everything should be
893 * initialized properly
894 */
895 self->irlap = irlap_open(dev, &self->qos, hwname);
896
897 /* Ready to play! */
898
899 netif_start_queue(dev);
900
901 return 0;
902}
903
904/*
905 * Function irport_net_close (self)
906 *
907 * Network device is taken down. Usually this is done by
908 * "ifconfig irda0 down"
909 */
910static int irport_net_close(struct net_device *dev)
911{
912 struct irport_cb *self;
913 int iobase;
914 unsigned long flags;
915
916 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
917
918 IRDA_ASSERT(dev != NULL, return -1;);
919 self = (struct irport_cb *) dev->priv;
920
921 IRDA_ASSERT(self != NULL, return -1;);
922
923 iobase = self->io.sir_base;
924
925 /* Stop device */
926 netif_stop_queue(dev);
927
928 /* Stop and remove instance of IrLAP */
929 if (self->irlap)
930 irlap_close(self->irlap);
931 self->irlap = NULL;
932
933 spin_lock_irqsave(&self->lock, flags);
934 irport_stop(self);
935 spin_unlock_irqrestore(&self->lock, flags);
936
937 free_irq(self->io.irq, dev);
938
939 return 0;
940}
941
942/*
943 * Function irport_is_receiving (self)
944 *
945 * Returns true is we are currently receiving data
946 *
947 */
948static inline int irport_is_receiving(struct irport_cb *self)
949{
950 return (self->rx_buff.state != OUTSIDE_FRAME);
951}
952
953/*
954 * Function irport_set_dtr_rts (tty, dtr, rts)
955 *
956 * This function can be used by dongles etc. to set or reset the status
957 * of the dtr and rts lines
958 */
959static int irport_set_dtr_rts(struct net_device *dev, int dtr, int rts)
960{
961 struct irport_cb *self = dev->priv;
962 int iobase;
963
964 IRDA_ASSERT(self != NULL, return -1;);
965
966 iobase = self->io.sir_base;
967
968 if (dtr)
969 dtr = UART_MCR_DTR;
970 if (rts)
971 rts = UART_MCR_RTS;
972
973 outb(dtr|rts|UART_MCR_OUT2, iobase+UART_MCR);
974
975 return 0;
976}
977
978static int irport_raw_write(struct net_device *dev, __u8 *buf, int len)
979{
980 struct irport_cb *self = (struct irport_cb *) dev->priv;
981 int actual = 0;
982 int iobase;
983
984 IRDA_ASSERT(self != NULL, return -1;);
985
986 iobase = self->io.sir_base;
987
988 /* Tx FIFO should be empty! */
989 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
990 IRDA_DEBUG( 0, "%s(), failed, fifo not empty!\n", __FUNCTION__);
991 return -1;
992 }
993
994 /* Fill FIFO with current frame */
995 while (actual < len) {
996 /* Transmit next byte */
997 outb(buf[actual], iobase+UART_TX);
998 actual++;
999 }
1000
1001 return actual;
1002}
1003
1004/*
1005 * Function irport_net_ioctl (dev, rq, cmd)
1006 *
1007 * Process IOCTL commands for this device
1008 *
1009 */
1010static int irport_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1011{
1012 struct if_irda_req *irq = (struct if_irda_req *) rq;
1013 struct irport_cb *self;
1014 dongle_t *dongle;
1015 unsigned long flags;
1016 int ret = 0;
1017
1018 IRDA_ASSERT(dev != NULL, return -1;);
1019
1020 self = dev->priv;
1021
1022 IRDA_ASSERT(self != NULL, return -1;);
1023
1024 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
1025
1026 switch (cmd) {
1027 case SIOCSBANDWIDTH: /* Set bandwidth */
1028 if (!capable(CAP_NET_ADMIN))
1029 ret = -EPERM;
1030 else
1031 irda_task_execute(self, __irport_change_speed, NULL,
1032 NULL, (void *) irq->ifr_baudrate);
1033 break;
1034 case SIOCSDONGLE: /* Set dongle */
1035 if (!capable(CAP_NET_ADMIN)) {
1036 ret = -EPERM;
1037 break;
1038 }
1039
1040 /* Locking :
1041 * irda_device_dongle_init() can't be locked.
1042 * irda_task_execute() doesn't need to be locked.
1043 * Jean II
1044 */
1045
1046 /* Initialize dongle */
1047 dongle = irda_device_dongle_init(dev, irq->ifr_dongle);
1048 if (!dongle)
1049 break;
1050
1051 dongle->set_mode = NULL;
1052 dongle->read = NULL;
1053 dongle->write = irport_raw_write;
1054 dongle->set_dtr_rts = irport_set_dtr_rts;
1055
1056 /* Now initialize the dongle! */
1057 dongle->issue->open(dongle, &self->qos);
1058
1059 /* Reset dongle */
1060 irda_task_execute(dongle, dongle->issue->reset, NULL, NULL,
1061 NULL);
1062
1063 /* Make dongle available to driver only now to avoid
1064 * race conditions - Jean II */
1065 self->dongle = dongle;
1066 break;
1067 case SIOCSMEDIABUSY: /* Set media busy */
1068 if (!capable(CAP_NET_ADMIN)) {
1069 ret = -EPERM;
1070 break;
1071 }
1072
1073 irda_device_set_media_busy(self->netdev, TRUE);
1074 break;
1075 case SIOCGRECEIVING: /* Check if we are receiving right now */
1076 irq->ifr_receiving = irport_is_receiving(self);
1077 break;
1078 case SIOCSDTRRTS:
1079 if (!capable(CAP_NET_ADMIN)) {
1080 ret = -EPERM;
1081 break;
1082 }
1083
1084 /* No real need to lock... */
1085 spin_lock_irqsave(&self->lock, flags);
1086 irport_set_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
1087 spin_unlock_irqrestore(&self->lock, flags);
1088 break;
1089 default:
1090 ret = -EOPNOTSUPP;
1091 }
1092
1093 return ret;
1094}
1095
1096static struct net_device_stats *irport_net_get_stats(struct net_device *dev)
1097{
1098 struct irport_cb *self = (struct irport_cb *) dev->priv;
1099
1100 return &self->stats;
1101}
1102
1103static int __init irport_init(void)
1104{
1105 int i;
1106
1107 for (i=0; (io[i] < 2000) && (i < 4); i++) {
1108 if (irport_open(i, io[i], irq[i]) != NULL)
1109 return 0;
1110 }
1111 /*
1112 * Maybe something failed, but we can still be usable for FIR drivers
1113 */
1114 return 0;
1115}
1116
1117/*
1118 * Function irport_cleanup ()
1119 *
1120 * Close all configured ports
1121 *
1122 */
1123static void __exit irport_cleanup(void)
1124{
1125 int i;
1126
1127 IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
1128
1129 for (i=0; i < 4; i++) {
1130 if (dev_self[i])
1131 irport_close(dev_self[i]);
1132 }
1133}
1134
1135MODULE_PARM(io, "1-4i");
1136MODULE_PARM_DESC(io, "Base I/O addresses");
1137MODULE_PARM(irq, "1-4i");
1138MODULE_PARM_DESC(irq, "IRQ lines");
1139
1140MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1141MODULE_DESCRIPTION("Half duplex serial driver for IrDA SIR mode");
1142MODULE_LICENSE("GPL");
1143
1144module_init(irport_init);
1145module_exit(irport_cleanup);
1146
diff --git a/drivers/net/irda/irport.h b/drivers/net/irda/irport.h
new file mode 100644
index 000000000000..fc89c8c3dd7f
--- /dev/null
+++ b/drivers/net/irda/irport.h
@@ -0,0 +1,80 @@
1/*********************************************************************
2 *
3 * Filename: irport.h
4 * Version: 0.1
5 * Description: Serial driver for IrDA
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sun Aug 3 13:49:59 1997
9 * Modified at: Fri Jan 14 10:21:10 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1997, 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * All Rights Reserved.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * Neither Dag Brattli nor University of Tromsų admit liability nor
21 * provide warranty for any of this software. This material is
22 * provided "AS-IS" and at no charge.
23 *
24 ********************************************************************/
25
26#ifndef IRPORT_H
27#define IRPORT_H
28
29#include <linux/netdevice.h>
30#include <linux/skbuff.h>
31#include <linux/types.h>
32#include <linux/spinlock.h>
33
34#include <net/irda/irda_device.h>
35
36#define SPEED_DEFAULT 9600
37#define SPEED_MAX 115200
38
39/*
40 * These are the supported serial types.
41 */
42#define PORT_UNKNOWN 0
43#define PORT_8250 1
44#define PORT_16450 2
45#define PORT_16550 3
46#define PORT_16550A 4
47#define PORT_CIRRUS 5
48#define PORT_16650 6
49#define PORT_MAX 6
50
51#define FRAME_MAX_SIZE 2048
52
53struct irport_cb {
54 struct net_device *netdev; /* Yes! we are some kind of netdevice */
55 struct net_device_stats stats;
56
57 struct irlap_cb *irlap; /* The link layer we are attached to */
58
59 chipio_t io; /* IrDA controller information */
60 iobuff_t tx_buff; /* Transmit buffer */
61 iobuff_t rx_buff; /* Receive buffer */
62
63 struct qos_info qos; /* QoS capabilities for this device */
64 dongle_t *dongle; /* Dongle driver */
65
66 __u32 flags; /* Interface flags */
67 __u32 new_speed;
68 int mode;
69 int index; /* Instance index */
70 int transmitting; /* Are we transmitting ? */
71
72 spinlock_t lock; /* For serializing operations */
73
74 /* For piggyback drivers */
75 void *priv;
76 void (*change_speed)(void *priv, __u32 speed);
77 int (*interrupt)(int irq, void *dev_id, struct pt_regs *regs);
78};
79
80#endif /* IRPORT_H */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
new file mode 100644
index 000000000000..7d23aa375908
--- /dev/null
+++ b/drivers/net/irda/irtty-sir.c
@@ -0,0 +1,642 @@
1/*********************************************************************
2 *
3 * Filename: irtty-sir.c
4 * Version: 2.0
5 * Description: IrDA line discipline implementation
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Tue Dec 9 21:18:38 1997
9 * Modified at: Sun Oct 27 22:13:30 2002
10 * Modified by: Martin Diehl <mad@mdiehl.de>
11 * Sources: slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
12 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
13 *
14 * Copyright (c) 1998-2000 Dag Brattli,
15 * Copyright (c) 2002 Martin Diehl,
16 * All Rights Reserved.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of
21 * the License, or (at your option) any later version.
22 *
23 * Neither Dag Brattli nor University of Tromsų admit liability nor
24 * provide warranty for any of this software. This material is
25 * provided "AS-IS" and at no charge.
26 *
27 ********************************************************************/
28
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/tty.h>
32#include <linux/init.h>
33#include <asm/uaccess.h>
34#include <linux/smp_lock.h>
35#include <linux/delay.h>
36
37#include <net/irda/irda.h>
38#include <net/irda/irda_device.h>
39
40#include "sir-dev.h"
41#include "irtty-sir.h"
42
43static int qos_mtt_bits = 0x03; /* 5 ms or more */
44
45module_param(qos_mtt_bits, int, 0);
46MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
47
48/* ------------------------------------------------------- */
49
50/* device configuration callbacks always invoked with irda-thread context */
51
52/* find out, how many chars we have in buffers below us
53 * this is allowed to lie, i.e. return less chars than we
54 * actually have. The returned value is used to determine
55 * how long the irdathread should wait before doing the
56 * real blocking wait_until_sent()
57 */
58
59static int irtty_chars_in_buffer(struct sir_dev *dev)
60{
61 struct sirtty_cb *priv = dev->priv;
62
63 IRDA_ASSERT(priv != NULL, return -1;);
64 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
65
66 return priv->tty->driver->chars_in_buffer(priv->tty);
67}
68
69/* Wait (sleep) until underlaying hardware finished transmission
70 * i.e. hardware buffers are drained
71 * this must block and not return before all characters are really sent
72 *
73 * If the tty sits on top of a 16550A-like uart, there are typically
74 * up to 16 bytes in the fifo - f.e. 9600 bps 8N1 needs 16.7 msec
75 *
76 * With usbserial the uart-fifo is basically replaced by the converter's
77 * outgoing endpoint buffer, which can usually hold 64 bytes (at least).
78 * With pl2303 it appears we are safe with 60msec here.
79 *
80 * I really wish all serial drivers would provide
81 * correct implementation of wait_until_sent()
82 */
83
84#define USBSERIAL_TX_DONE_DELAY 60
85
86static void irtty_wait_until_sent(struct sir_dev *dev)
87{
88 struct sirtty_cb *priv = dev->priv;
89 struct tty_struct *tty;
90
91 IRDA_ASSERT(priv != NULL, return;);
92 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
93
94 tty = priv->tty;
95 if (tty->driver->wait_until_sent) {
96 lock_kernel();
97 tty->driver->wait_until_sent(tty, msecs_to_jiffies(100));
98 unlock_kernel();
99 }
100 else {
101 msleep(USBSERIAL_TX_DONE_DELAY);
102 }
103}
104
105/*
106 * Function irtty_change_speed (dev, speed)
107 *
108 * Change the speed of the serial port.
109 *
110 * This may sleep in set_termios (usbserial driver f.e.) and must
111 * not be called from interrupt/timer/tasklet therefore.
112 * All such invocations are deferred to kIrDAd now so we can sleep there.
113 */
114
115static int irtty_change_speed(struct sir_dev *dev, unsigned speed)
116{
117 struct sirtty_cb *priv = dev->priv;
118 struct tty_struct *tty;
119 struct termios old_termios;
120 int cflag;
121
122 IRDA_ASSERT(priv != NULL, return -1;);
123 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
124
125 tty = priv->tty;
126
127 lock_kernel();
128 old_termios = *(tty->termios);
129 cflag = tty->termios->c_cflag;
130
131 cflag &= ~CBAUD;
132
133 IRDA_DEBUG(2, "%s(), Setting speed to %d\n", __FUNCTION__, speed);
134
135 switch (speed) {
136 case 1200:
137 cflag |= B1200;
138 break;
139 case 2400:
140 cflag |= B2400;
141 break;
142 case 4800:
143 cflag |= B4800;
144 break;
145 case 19200:
146 cflag |= B19200;
147 break;
148 case 38400:
149 cflag |= B38400;
150 break;
151 case 57600:
152 cflag |= B57600;
153 break;
154 case 115200:
155 cflag |= B115200;
156 break;
157 case 9600:
158 default:
159 cflag |= B9600;
160 break;
161 }
162
163 tty->termios->c_cflag = cflag;
164 if (tty->driver->set_termios)
165 tty->driver->set_termios(tty, &old_termios);
166 unlock_kernel();
167
168 priv->io.speed = speed;
169
170 return 0;
171}
172
173/*
174 * Function irtty_set_dtr_rts (dev, dtr, rts)
175 *
176 * This function can be used by dongles etc. to set or reset the status
177 * of the dtr and rts lines
178 */
179
180static int irtty_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
181{
182 struct sirtty_cb *priv = dev->priv;
183 int set = 0;
184 int clear = 0;
185
186 IRDA_ASSERT(priv != NULL, return -1;);
187 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
188
189 if (rts)
190 set |= TIOCM_RTS;
191 else
192 clear |= TIOCM_RTS;
193 if (dtr)
194 set |= TIOCM_DTR;
195 else
196 clear |= TIOCM_DTR;
197
198 /*
199 * We can't use ioctl() because it expects a non-null file structure,
200 * and we don't have that here.
201 * This function is not yet defined for all tty driver, so
202 * let's be careful... Jean II
203 */
204 IRDA_ASSERT(priv->tty->driver->tiocmset != NULL, return -1;);
205 priv->tty->driver->tiocmset(priv->tty, NULL, set, clear);
206
207 return 0;
208}
209
210/* ------------------------------------------------------- */
211
212/* called from sir_dev when there is more data to send
213 * context is either netdev->hard_xmit or some transmit-completion bh
214 * i.e. we are under spinlock here and must not sleep.
215 */
216
217static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t len)
218{
219 struct sirtty_cb *priv = dev->priv;
220 struct tty_struct *tty;
221 int writelen;
222
223 IRDA_ASSERT(priv != NULL, return -1;);
224 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
225
226 tty = priv->tty;
227 if (!tty->driver->write)
228 return 0;
229 tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
230 if (tty->driver->write_room) {
231 writelen = tty->driver->write_room(tty);
232 if (writelen > len)
233 writelen = len;
234 }
235 else
236 writelen = len;
237 return tty->driver->write(tty, ptr, writelen);
238}
239
240/* ------------------------------------------------------- */
241
242/* irda line discipline callbacks */
243
244/*
245 * Function irtty_receive_buf( tty, cp, count)
246 *
247 * Handle the 'receiver data ready' interrupt. This function is called
248 * by the 'tty_io' module in the kernel when a block of IrDA data has
249 * been received, which can now be decapsulated and delivered for
250 * further processing
251 *
252 * calling context depends on underlying driver and tty->low_latency!
253 * for example (low_latency: 1 / 0):
254 * serial.c: uart-interrupt / softint
255 * usbserial: urb-complete-interrupt / softint
256 */
257
258static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
259 char *fp, int count)
260{
261 struct sir_dev *dev;
262 struct sirtty_cb *priv = tty->disc_data;
263 int i;
264
265 IRDA_ASSERT(priv != NULL, return;);
266 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
267
268 if (unlikely(count==0)) /* yes, this happens */
269 return;
270
271 dev = priv->dev;
272 if (!dev) {
273 IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__);
274 return;
275 }
276
277 for (i = 0; i < count; i++) {
278 /*
279 * Characters received with a parity error, etc?
280 */
281 if (fp && *fp++) {
282 IRDA_DEBUG(0, "Framing or parity error!\n");
283 sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
284 return;
285 }
286 }
287
288 sirdev_receive(dev, cp, count);
289}
290
291/*
292 * Function irtty_receive_room (tty)
293 *
294 * Used by the TTY to find out how much data we can receive at a time
295 *
296*/
297static int irtty_receive_room(struct tty_struct *tty)
298{
299 struct sirtty_cb *priv = tty->disc_data;
300
301 IRDA_ASSERT(priv != NULL, return 0;);
302 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return 0;);
303
304 return 65536; /* We can handle an infinite amount of data. :-) */
305}
306
307/*
308 * Function irtty_write_wakeup (tty)
309 *
310 * Called by the driver when there's room for more data. If we have
311 * more packets to send, we send them here.
312 *
313 */
314static void irtty_write_wakeup(struct tty_struct *tty)
315{
316 struct sirtty_cb *priv = tty->disc_data;
317
318 IRDA_ASSERT(priv != NULL, return;);
319 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
320
321 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
322
323 if (priv->dev)
324 sirdev_write_complete(priv->dev);
325}
326
327/* ------------------------------------------------------- */
328
329/*
330 * Function irtty_stop_receiver (tty, stop)
331 *
332 */
333
334static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
335{
336 struct termios old_termios;
337 int cflag;
338
339 lock_kernel();
340 old_termios = *(tty->termios);
341 cflag = tty->termios->c_cflag;
342
343 if (stop)
344 cflag &= ~CREAD;
345 else
346 cflag |= CREAD;
347
348 tty->termios->c_cflag = cflag;
349 if (tty->driver->set_termios)
350 tty->driver->set_termios(tty, &old_termios);
351 unlock_kernel();
352}
353
354/*****************************************************************/
355
356/* serialize ldisc open/close with sir_dev */
357static DECLARE_MUTEX(irtty_sem);
358
359/* notifier from sir_dev when irda% device gets opened (ifup) */
360
361static int irtty_start_dev(struct sir_dev *dev)
362{
363 struct sirtty_cb *priv;
364 struct tty_struct *tty;
365
366 /* serialize with ldisc open/close */
367 down(&irtty_sem);
368
369 priv = dev->priv;
370 if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
371 up(&irtty_sem);
372 return -ESTALE;
373 }
374
375 tty = priv->tty;
376
377 if (tty->driver->start)
378 tty->driver->start(tty);
379 /* Make sure we can receive more data */
380 irtty_stop_receiver(tty, FALSE);
381
382 up(&irtty_sem);
383 return 0;
384}
385
386/* notifier from sir_dev when irda% device gets closed (ifdown) */
387
388static int irtty_stop_dev(struct sir_dev *dev)
389{
390 struct sirtty_cb *priv;
391 struct tty_struct *tty;
392
393 /* serialize with ldisc open/close */
394 down(&irtty_sem);
395
396 priv = dev->priv;
397 if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) {
398 up(&irtty_sem);
399 return -ESTALE;
400 }
401
402 tty = priv->tty;
403
404 /* Make sure we don't receive more data */
405 irtty_stop_receiver(tty, TRUE);
406 if (tty->driver->stop)
407 tty->driver->stop(tty);
408
409 up(&irtty_sem);
410
411 return 0;
412}
413
414/* ------------------------------------------------------- */
415
416static struct sir_driver sir_tty_drv = {
417 .owner = THIS_MODULE,
418 .driver_name = "sir_tty",
419 .start_dev = irtty_start_dev,
420 .stop_dev = irtty_stop_dev,
421 .do_write = irtty_do_write,
422 .chars_in_buffer = irtty_chars_in_buffer,
423 .wait_until_sent = irtty_wait_until_sent,
424 .set_speed = irtty_change_speed,
425 .set_dtr_rts = irtty_set_dtr_rts,
426};
427
428/* ------------------------------------------------------- */
429
430/*
431 * Function irtty_ioctl (tty, file, cmd, arg)
432 *
433 * The Swiss army knife of system calls :-)
434 *
435 */
436static int irtty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
437{
438 struct irtty_info { char name[6]; } info;
439 struct sir_dev *dev;
440 struct sirtty_cb *priv = tty->disc_data;
441 int err = 0;
442
443 IRDA_ASSERT(priv != NULL, return -ENODEV;);
444 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EBADR;);
445
446 IRDA_DEBUG(3, "%s(cmd=0x%X)\n", __FUNCTION__, cmd);
447
448 dev = priv->dev;
449 IRDA_ASSERT(dev != NULL, return -1;);
450
451 switch (cmd) {
452 case TCGETS:
453 case TCGETA:
454 err = n_tty_ioctl(tty, file, cmd, arg);
455 break;
456
457 case IRTTY_IOCTDONGLE:
458 /* this call blocks for completion */
459 err = sirdev_set_dongle(dev, (IRDA_DONGLE) arg);
460 break;
461
462 case IRTTY_IOCGET:
463 IRDA_ASSERT(dev->netdev != NULL, return -1;);
464
465 memset(&info, 0, sizeof(info));
466 strncpy(info.name, dev->netdev->name, sizeof(info.name)-1);
467
468 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
469 err = -EFAULT;
470 break;
471 default:
472 err = -ENOIOCTLCMD;
473 break;
474 }
475 return err;
476}
477
478
479/*
480 * Function irtty_open(tty)
481 *
482 * This function is called by the TTY module when the IrDA line
483 * discipline is called for. Because we are sure the tty line exists,
484 * we only have to link it to a free IrDA channel.
485 */
486static int irtty_open(struct tty_struct *tty)
487{
488 struct sir_dev *dev;
489 struct sirtty_cb *priv;
490 int ret = 0;
491
492 /* Module stuff handled via irda_ldisc.owner - Jean II */
493
494 /* First make sure we're not already connected. */
495 if (tty->disc_data != NULL) {
496 priv = tty->disc_data;
497 if (priv && priv->magic == IRTTY_MAGIC) {
498 ret = -EEXIST;
499 goto out;
500 }
501 tty->disc_data = NULL; /* ### */
502 }
503
504 /* stop the underlying driver */
505 irtty_stop_receiver(tty, TRUE);
506 if (tty->driver->stop)
507 tty->driver->stop(tty);
508
509 if (tty->driver->flush_buffer)
510 tty->driver->flush_buffer(tty);
511
512 /* apply mtt override */
513 sir_tty_drv.qos_mtt_bits = qos_mtt_bits;
514
515 /* get a sir device instance for this driver */
516 dev = sirdev_get_instance(&sir_tty_drv, tty->name);
517 if (!dev) {
518 ret = -ENODEV;
519 goto out;
520 }
521
522 /* allocate private device info block */
523 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
524 if (!priv)
525 goto out_put;
526 memset(priv, 0, sizeof(*priv));
527
528 priv->magic = IRTTY_MAGIC;
529 priv->tty = tty;
530 priv->dev = dev;
531
532 /* serialize with start_dev - in case we were racing with ifup */
533 down(&irtty_sem);
534
535 dev->priv = priv;
536 tty->disc_data = priv;
537
538 up(&irtty_sem);
539
540 IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name);
541
542 return 0;
543
544out_put:
545 sirdev_put_instance(dev);
546out:
547 return ret;
548}
549
550/*
551 * Function irtty_close (tty)
552 *
553 * Close down a IrDA channel. This means flushing out any pending queues,
554 * and then restoring the TTY line discipline to what it was before it got
555 * hooked to IrDA (which usually is TTY again).
556 */
557static void irtty_close(struct tty_struct *tty)
558{
559 struct sirtty_cb *priv = tty->disc_data;
560
561 IRDA_ASSERT(priv != NULL, return;);
562 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
563
564 /* Hm, with a dongle attached the dongle driver wants
565 * to close the dongle - which requires the use of
566 * some tty write and/or termios or ioctl operations.
567 * Are we allowed to call those when already requested
568 * to shutdown the ldisc?
569 * If not, we should somehow mark the dev being staled.
570 * Question remains, how to close the dongle in this case...
571 * For now let's assume we are granted to issue tty driver calls
572 * until we return here from the ldisc close. I'm just wondering
573 * how this behaves with hotpluggable serial hardware like
574 * rs232-pcmcia card or usb-serial...
575 *
576 * priv->tty = NULL?;
577 */
578
579 /* we are dead now */
580 tty->disc_data = NULL;
581
582 sirdev_put_instance(priv->dev);
583
584 /* Stop tty */
585 irtty_stop_receiver(tty, TRUE);
586 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
587 if (tty->driver->stop)
588 tty->driver->stop(tty);
589
590 kfree(priv);
591
592 IRDA_DEBUG(0, "%s - %s: irda line discipline closed\n", __FUNCTION__, tty->name);
593}
594
595/* ------------------------------------------------------- */
596
597static struct tty_ldisc irda_ldisc = {
598 .magic = TTY_LDISC_MAGIC,
599 .name = "irda",
600 .flags = 0,
601 .open = irtty_open,
602 .close = irtty_close,
603 .read = NULL,
604 .write = NULL,
605 .ioctl = irtty_ioctl,
606 .poll = NULL,
607 .receive_buf = irtty_receive_buf,
608 .receive_room = irtty_receive_room,
609 .write_wakeup = irtty_write_wakeup,
610 .owner = THIS_MODULE,
611};
612
613/* ------------------------------------------------------- */
614
615static int __init irtty_sir_init(void)
616{
617 int err;
618
619 if ((err = tty_register_ldisc(N_IRDA, &irda_ldisc)) != 0)
620 IRDA_ERROR("IrDA: can't register line discipline (err = %d)\n",
621 err);
622 return err;
623}
624
625static void __exit irtty_sir_cleanup(void)
626{
627 int err;
628
629 if ((err = tty_register_ldisc(N_IRDA, NULL))) {
630 IRDA_ERROR("%s(), can't unregister line discipline (err = %d)\n",
631 __FUNCTION__, err);
632 }
633}
634
635module_init(irtty_sir_init);
636module_exit(irtty_sir_cleanup);
637
638MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
639MODULE_DESCRIPTION("IrDA TTY device driver");
640MODULE_ALIAS_LDISC(N_IRDA);
641MODULE_LICENSE("GPL");
642
diff --git a/drivers/net/irda/irtty-sir.h b/drivers/net/irda/irtty-sir.h
new file mode 100644
index 000000000000..b132d8f6eb13
--- /dev/null
+++ b/drivers/net/irda/irtty-sir.h
@@ -0,0 +1,34 @@
1/*********************************************************************
2 *
3 * sir_tty.h: definitions for the irtty_sir client driver (former irtty)
4 *
5 * Copyright (c) 2002 Martin Diehl
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 ********************************************************************/
13
14#ifndef IRTTYSIR_H
15#define IRTTYSIR_H
16
17#include <net/irda/irda.h>
18#include <net/irda/irda_device.h> // chipio_t
19
20#define IRTTY_IOC_MAGIC 'e'
21#define IRTTY_IOCTDONGLE _IO(IRTTY_IOC_MAGIC, 1)
22#define IRTTY_IOCGET _IOR(IRTTY_IOC_MAGIC, 2, struct irtty_info)
23#define IRTTY_IOC_MAXNR 2
24
25struct sirtty_cb {
26 magic_t magic;
27
28 struct sir_dev *dev;
29 struct tty_struct *tty;
30
31 chipio_t io; /* IrDA controller information */
32};
33
34#endif
diff --git a/drivers/net/irda/litelink-sir.c b/drivers/net/irda/litelink-sir.c
new file mode 100644
index 000000000000..73261c54bbfd
--- /dev/null
+++ b/drivers/net/irda/litelink-sir.c
@@ -0,0 +1,209 @@
1/*********************************************************************
2 *
3 * Filename: litelink.c
4 * Version: 1.1
5 * Description: Driver for the Parallax LiteLink dongle
6 * Status: Stable
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Fri May 7 12:50:33 1999
9 * Modified at: Fri Dec 17 09:14:23 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
27 * MA 02111-1307 USA
28 *
29 ********************************************************************/
30
31/*
32 * Modified at: Thu Jan 15 2003
33 * Modified by: Eugene Crosser <crosser@average.org>
34 *
35 * Convert to "new" IRDA infrastructure for kernel 2.6
36 */
37
38#include <linux/module.h>
39#include <linux/delay.h>
40#include <linux/init.h>
41
42#include <net/irda/irda.h>
43
44#include "sir-dev.h"
45
46#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
47#define MAX_DELAY 10000 /* 1 ms */
48
49static int litelink_open(struct sir_dev *dev);
50static int litelink_close(struct sir_dev *dev);
51static int litelink_change_speed(struct sir_dev *dev, unsigned speed);
52static int litelink_reset(struct sir_dev *dev);
53
54/* These are the baudrates supported - 9600 must be last one! */
55static unsigned baud_rates[] = { 115200, 57600, 38400, 19200, 9600 };
56
57static struct dongle_driver litelink = {
58 .owner = THIS_MODULE,
59 .driver_name = "Parallax LiteLink",
60 .type = IRDA_LITELINK_DONGLE,
61 .open = litelink_open,
62 .close = litelink_close,
63 .reset = litelink_reset,
64 .set_speed = litelink_change_speed,
65};
66
67static int __init litelink_sir_init(void)
68{
69 return irda_register_dongle(&litelink);
70}
71
72static void __exit litelink_sir_cleanup(void)
73{
74 irda_unregister_dongle(&litelink);
75}
76
77static int litelink_open(struct sir_dev *dev)
78{
79 struct qos_info *qos = &dev->qos;
80
81 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
82
83 /* Power up dongle */
84 sirdev_set_dtr_rts(dev, TRUE, TRUE);
85
86 /* Set the speeds we can accept */
87 qos->baud_rate.bits &= IR_115200|IR_57600|IR_38400|IR_19200|IR_9600;
88 qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
89 irda_qos_bits_to_value(qos);
90
91 /* irda thread waits 50 msec for power settling */
92
93 return 0;
94}
95
96static int litelink_close(struct sir_dev *dev)
97{
98 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
99
100 /* Power off dongle */
101 sirdev_set_dtr_rts(dev, FALSE, FALSE);
102
103 return 0;
104}
105
106/*
107 * Function litelink_change_speed (task)
108 *
109 * Change speed of the Litelink dongle. To cycle through the available
110 * baud rates, pulse RTS low for a few ms.
111 */
112static int litelink_change_speed(struct sir_dev *dev, unsigned speed)
113{
114 int i;
115
116 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
117
118 /* dongle already reset by irda-thread - current speed (dongle and
119 * port) is the default speed (115200 for litelink!)
120 */
121
122 /* Cycle through avaiable baudrates until we reach the correct one */
123 for (i = 0; baud_rates[i] != speed; i++) {
124
125 /* end-of-list reached due to invalid speed request */
126 if (baud_rates[i] == 9600)
127 break;
128
129 /* Set DTR, clear RTS */
130 sirdev_set_dtr_rts(dev, FALSE, TRUE);
131
132 /* Sleep a minimum of 15 us */
133 udelay(MIN_DELAY);
134
135 /* Set DTR, Set RTS */
136 sirdev_set_dtr_rts(dev, TRUE, TRUE);
137
138 /* Sleep a minimum of 15 us */
139 udelay(MIN_DELAY);
140 }
141
142 dev->speed = baud_rates[i];
143
144 /* invalid baudrate should not happen - but if, we return -EINVAL and
145 * the dongle configured for 9600 so the stack has a chance to recover
146 */
147
148 return (dev->speed == speed) ? 0 : -EINVAL;
149}
150
151/*
152 * Function litelink_reset (task)
153 *
154 * Reset the Litelink type dongle.
155 *
156 */
157static int litelink_reset(struct sir_dev *dev)
158{
159 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
160
161 /* probably the power-up can be dropped here, but with only
162 * 15 usec delay it's not worth the risk unless somebody with
163 * the hardware confirms it doesn't break anything...
164 */
165
166 /* Power on dongle */
167 sirdev_set_dtr_rts(dev, TRUE, TRUE);
168
169 /* Sleep a minimum of 15 us */
170 udelay(MIN_DELAY);
171
172 /* Clear RTS to reset dongle */
173 sirdev_set_dtr_rts(dev, TRUE, FALSE);
174
175 /* Sleep a minimum of 15 us */
176 udelay(MIN_DELAY);
177
178 /* Go back to normal mode */
179 sirdev_set_dtr_rts(dev, TRUE, TRUE);
180
181 /* Sleep a minimum of 15 us */
182 udelay(MIN_DELAY);
183
184 /* This dongles speed defaults to 115200 bps */
185 dev->speed = 115200;
186
187 return 0;
188}
189
190MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
191MODULE_DESCRIPTION("Parallax Litelink dongle driver");
192MODULE_LICENSE("GPL");
193MODULE_ALIAS("irda-dongle-5"); /* IRDA_LITELINK_DONGLE */
194
195/*
196 * Function init_module (void)
197 *
198 * Initialize Litelink module
199 *
200 */
201module_init(litelink_sir_init);
202
203/*
204 * Function cleanup_module (void)
205 *
206 * Cleanup Litelink module
207 *
208 */
209module_exit(litelink_sir_cleanup);
diff --git a/drivers/net/irda/litelink.c b/drivers/net/irda/litelink.c
new file mode 100644
index 000000000000..7db11431d0f4
--- /dev/null
+++ b/drivers/net/irda/litelink.c
@@ -0,0 +1,179 @@
1/*********************************************************************
2 *
3 * Filename: litelink.c
4 * Version: 1.1
5 * Description: Driver for the Parallax LiteLink dongle
6 * Status: Stable
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Fri May 7 12:50:33 1999
9 * Modified at: Fri Dec 17 09:14:23 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1999 Dag Brattli, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
27 * MA 02111-1307 USA
28 *
29 ********************************************************************/
30
31#include <linux/module.h>
32#include <linux/delay.h>
33#include <linux/tty.h>
34#include <linux/init.h>
35
36#include <net/irda/irda.h>
37#include <net/irda/irda_device.h>
38
39#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
40#define MAX_DELAY 10000 /* 1 ms */
41
42static void litelink_open(dongle_t *self, struct qos_info *qos);
43static void litelink_close(dongle_t *self);
44static int litelink_change_speed(struct irda_task *task);
45static int litelink_reset(struct irda_task *task);
46
47/* These are the baudrates supported */
48static __u32 baud_rates[] = { 115200, 57600, 38400, 19200, 9600 };
49
50static struct dongle_reg dongle = {
51 .type = IRDA_LITELINK_DONGLE,
52 .open = litelink_open,
53 .close = litelink_close,
54 .reset = litelink_reset,
55 .change_speed = litelink_change_speed,
56 .owner = THIS_MODULE,
57};
58
59static int __init litelink_init(void)
60{
61 return irda_device_register_dongle(&dongle);
62}
63
64static void __exit litelink_cleanup(void)
65{
66 irda_device_unregister_dongle(&dongle);
67}
68
69static void litelink_open(dongle_t *self, struct qos_info *qos)
70{
71 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
72 qos->min_turn_time.bits = 0x7f; /* Needs 0.01 ms */
73}
74
75static void litelink_close(dongle_t *self)
76{
77 /* Power off dongle */
78 self->set_dtr_rts(self->dev, FALSE, FALSE);
79}
80
81/*
82 * Function litelink_change_speed (task)
83 *
84 * Change speed of the Litelink dongle. To cycle through the available
85 * baud rates, pulse RTS low for a few ms.
86 */
87static int litelink_change_speed(struct irda_task *task)
88{
89 dongle_t *self = (dongle_t *) task->instance;
90 __u32 speed = (__u32) task->param;
91 int i;
92
93 /* Clear RTS to reset dongle */
94 self->set_dtr_rts(self->dev, TRUE, FALSE);
95
96 /* Sleep a minimum of 15 us */
97 udelay(MIN_DELAY);
98
99 /* Go back to normal mode */
100 self->set_dtr_rts(self->dev, TRUE, TRUE);
101
102 /* Sleep a minimum of 15 us */
103 udelay(MIN_DELAY);
104
105 /* Cycle through avaiable baudrates until we reach the correct one */
106 for (i=0; i<5 && baud_rates[i] != speed; i++) {
107 /* Set DTR, clear RTS */
108 self->set_dtr_rts(self->dev, FALSE, TRUE);
109
110 /* Sleep a minimum of 15 us */
111 udelay(MIN_DELAY);
112
113 /* Set DTR, Set RTS */
114 self->set_dtr_rts(self->dev, TRUE, TRUE);
115
116 /* Sleep a minimum of 15 us */
117 udelay(MIN_DELAY);
118 }
119 irda_task_next_state(task, IRDA_TASK_DONE);
120
121 return 0;
122}
123
124/*
125 * Function litelink_reset (task)
126 *
127 * Reset the Litelink type dongle.
128 *
129 */
130static int litelink_reset(struct irda_task *task)
131{
132 dongle_t *self = (dongle_t *) task->instance;
133
134 /* Power on dongle */
135 self->set_dtr_rts(self->dev, TRUE, TRUE);
136
137 /* Sleep a minimum of 15 us */
138 udelay(MIN_DELAY);
139
140 /* Clear RTS to reset dongle */
141 self->set_dtr_rts(self->dev, TRUE, FALSE);
142
143 /* Sleep a minimum of 15 us */
144 udelay(MIN_DELAY);
145
146 /* Go back to normal mode */
147 self->set_dtr_rts(self->dev, TRUE, TRUE);
148
149 /* Sleep a minimum of 15 us */
150 udelay(MIN_DELAY);
151
152 /* This dongles speed defaults to 115200 bps */
153 self->speed = 115200;
154
155 irda_task_next_state(task, IRDA_TASK_DONE);
156
157 return 0;
158}
159
160MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
161MODULE_DESCRIPTION("Parallax Litelink dongle driver");
162MODULE_LICENSE("GPL");
163MODULE_ALIAS("irda-dongle-5"); /* IRDA_LITELINK_DONGLE */
164
165/*
166 * Function init_module (void)
167 *
168 * Initialize Litelink module
169 *
170 */
171module_init(litelink_init);
172
173/*
174 * Function cleanup_module (void)
175 *
176 * Cleanup Litelink module
177 *
178 */
179module_exit(litelink_cleanup);
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
new file mode 100644
index 000000000000..ebed168b7da6
--- /dev/null
+++ b/drivers/net/irda/ma600-sir.c
@@ -0,0 +1,264 @@
1/*********************************************************************
2 *
3 * Filename: ma600.c
4 * Version: 0.1
5 * Description: Implementation of the MA600 dongle
6 * Status: Experimental.
7 * Author: Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95
8 * Created at: Sat Jun 10 20:02:35 2000
9 * Modified at: Sat Aug 16 09:34:13 2003
10 * Modified by: Martin Diehl <mad@mdiehl.de> (modified for new sir_dev)
11 *
12 * Note: very thanks to Mr. Maru Wang <maru@mobileaction.com.tw> for providing
13 * information on the MA600 dongle
14 *
15 * Copyright (c) 2000 Leung, All Rights Reserved.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
30 * MA 02111-1307 USA
31 *
32 ********************************************************************/
33
34#include <linux/module.h>
35#include <linux/delay.h>
36#include <linux/init.h>
37#include <linux/sched.h>
38
39#include <net/irda/irda.h>
40
41#include "sir-dev.h"
42
43static int ma600_open(struct sir_dev *);
44static int ma600_close(struct sir_dev *);
45static int ma600_change_speed(struct sir_dev *, unsigned);
46static int ma600_reset(struct sir_dev *);
47
48/* control byte for MA600 */
49#define MA600_9600 0x00
50#define MA600_19200 0x01
51#define MA600_38400 0x02
52#define MA600_57600 0x03
53#define MA600_115200 0x04
54#define MA600_DEV_ID1 0x05
55#define MA600_DEV_ID2 0x06
56#define MA600_2400 0x08
57
58static struct dongle_driver ma600 = {
59 .owner = THIS_MODULE,
60 .driver_name = "MA600",
61 .type = IRDA_MA600_DONGLE,
62 .open = ma600_open,
63 .close = ma600_close,
64 .reset = ma600_reset,
65 .set_speed = ma600_change_speed,
66};
67
68
69static int __init ma600_sir_init(void)
70{
71 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
72 return irda_register_dongle(&ma600);
73}
74
75static void __exit ma600_sir_cleanup(void)
76{
77 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
78 irda_unregister_dongle(&ma600);
79}
80
81/*
82 Power on:
83 (0) Clear RTS and DTR for 1 second
84 (1) Set RTS and DTR for 1 second
85 (2) 9600 bps now
86 Note: assume RTS, DTR are clear before
87*/
88static int ma600_open(struct sir_dev *dev)
89{
90 struct qos_info *qos = &dev->qos;
91
92 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
93
94 sirdev_set_dtr_rts(dev, TRUE, TRUE);
95
96 /* Explicitly set the speeds we can accept */
97 qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400
98 |IR_57600|IR_115200;
99 /* Hm, 0x01 means 10ms - for >= 1ms we would need 0x07 */
100 qos->min_turn_time.bits = 0x01; /* Needs at least 1 ms */
101 irda_qos_bits_to_value(qos);
102
103 /* irda thread waits 50 msec for power settling */
104
105 return 0;
106}
107
108static int ma600_close(struct sir_dev *dev)
109{
110 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
111
112 /* Power off dongle */
113 sirdev_set_dtr_rts(dev, FALSE, FALSE);
114
115 return 0;
116}
117
118static __u8 get_control_byte(__u32 speed)
119{
120 __u8 byte;
121
122 switch (speed) {
123 default:
124 case 115200:
125 byte = MA600_115200;
126 break;
127 case 57600:
128 byte = MA600_57600;
129 break;
130 case 38400:
131 byte = MA600_38400;
132 break;
133 case 19200:
134 byte = MA600_19200;
135 break;
136 case 9600:
137 byte = MA600_9600;
138 break;
139 case 2400:
140 byte = MA600_2400;
141 break;
142 }
143
144 return byte;
145}
146
147/*
148 * Function ma600_change_speed (dev, speed)
149 *
150 * Set the speed for the MA600 type dongle.
151 *
152 * The dongle has already been reset to a known state (dongle default)
153 * We cycle through speeds by pulsing RTS low and then high.
154 */
155
156/*
157 * Function ma600_change_speed (dev, speed)
158 *
159 * Set the speed for the MA600 type dongle.
160 *
161 * Algorithm
162 * 1. Reset (already done by irda thread state machine)
163 * 2. clear RTS, set DTR and wait for 1ms
164 * 3. send Control Byte to the MA600 through TXD to set new baud rate
165 * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
166 * it takes about 10 msec)
167 * 4. set RTS, set DTR (return to NORMAL Operation)
168 * 5. wait at least 10 ms, new setting (baud rate, etc) takes effect here
169 * after
170 */
171
172/* total delays are only about 20ms - let's just sleep for now to
173 * avoid the state machine complexity before we get things working
174 */
175
176static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
177{
178 u8 byte;
179
180 IRDA_DEBUG(2, "%s(), speed=%d (was %d)\n", __FUNCTION__,
181 speed, dev->speed);
182
183 /* dongle already reset, dongle and port at default speed (9600) */
184
185 /* Set RTS low for 1 ms */
186 sirdev_set_dtr_rts(dev, TRUE, FALSE);
187 mdelay(1);
188
189 /* Write control byte */
190 byte = get_control_byte(speed);
191 sirdev_raw_write(dev, &byte, sizeof(byte));
192
193 /* Wait at least 10ms: fake wait_until_sent - 10 bits at 9600 baud*/
194 msleep(15); /* old ma600 uses 15ms */
195
196#if 1
197 /* read-back of the control byte. ma600 is the first dongle driver
198 * which uses this so there might be some unidentified issues.
199 * Disable this in case of problems with readback.
200 */
201
202 sirdev_raw_read(dev, &byte, sizeof(byte));
203 if (byte != get_control_byte(speed)) {
204 IRDA_WARNING("%s(): bad control byte read-back %02x != %02x\n",
205 __FUNCTION__, (unsigned) byte,
206 (unsigned) get_control_byte(speed));
207 return -1;
208 }
209 else
210 IRDA_DEBUG(2, "%s() control byte write read OK\n", __FUNCTION__);
211#endif
212
213 /* Set DTR, Set RTS */
214 sirdev_set_dtr_rts(dev, TRUE, TRUE);
215
216 /* Wait at least 10ms */
217 msleep(10);
218
219 /* dongle is now switched to the new speed */
220 dev->speed = speed;
221
222 return 0;
223}
224
225/*
226 * Function ma600_reset (dev)
227 *
228 * This function resets the ma600 dongle.
229 *
230 * Algorithm:
231 * 0. DTR=0, RTS=1 and wait 10 ms
232 * 1. DTR=1, RTS=1 and wait 10 ms
233 * 2. 9600 bps now
234 */
235
236/* total delays are only about 20ms - let's just sleep for now to
237 * avoid the state machine complexity before we get things working
238 */
239
240int ma600_reset(struct sir_dev *dev)
241{
242 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
243
244 /* Reset the dongle : set DTR low for 10 ms */
245 sirdev_set_dtr_rts(dev, FALSE, TRUE);
246 msleep(10);
247
248 /* Go back to normal mode */
249 sirdev_set_dtr_rts(dev, TRUE, TRUE);
250 msleep(10);
251
252 dev->speed = 9600; /* That's the dongle-default */
253
254 return 0;
255}
256
257MODULE_AUTHOR("Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95");
258MODULE_DESCRIPTION("MA600 dongle driver version 0.1");
259MODULE_LICENSE("GPL");
260MODULE_ALIAS("irda-dongle-11"); /* IRDA_MA600_DONGLE */
261
262module_init(ma600_sir_init);
263module_exit(ma600_sir_cleanup);
264
diff --git a/drivers/net/irda/ma600.c b/drivers/net/irda/ma600.c
new file mode 100644
index 000000000000..f5e6836667fd
--- /dev/null
+++ b/drivers/net/irda/ma600.c
@@ -0,0 +1,354 @@
1/*********************************************************************
2 *
3 * Filename: ma600.c
4 * Version: 0.1
5 * Description: Implementation of the MA600 dongle
6 * Status: Experimental.
7 * Author: Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95
8 * Created at: Sat Jun 10 20:02:35 2000
9 * Modified at:
10 * Modified by:
11 *
12 * Note: very thanks to Mr. Maru Wang <maru@mobileaction.com.tw> for providing
13 * information on the MA600 dongle
14 *
15 * Copyright (c) 2000 Leung, All Rights Reserved.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
30 * MA 02111-1307 USA
31 *
32 ********************************************************************/
33
34/* define this macro for release version */
35//#define NDEBUG
36
37#include <linux/module.h>
38#include <linux/delay.h>
39#include <linux/tty.h>
40#include <linux/init.h>
41
42#include <net/irda/irda.h>
43#include <net/irda/irda_device.h>
44
45#ifndef NDEBUG
46 #undef IRDA_DEBUG
47 #define IRDA_DEBUG(n, args...) (printk(KERN_DEBUG args))
48
49 #undef ASSERT
50 #define ASSERT(expr, func) \
51 if(!(expr)) { \
52 printk( "Assertion failed! %s,%s,%s,line=%d\n",\
53 #expr,__FILE__,__FUNCTION__,__LINE__); \
54 func}
55#endif
56
57/* convert hex value to ascii hex */
58static const char hexTbl[] = "0123456789ABCDEF";
59
60
61static void ma600_open(dongle_t *self, struct qos_info *qos);
62static void ma600_close(dongle_t *self);
63static int ma600_change_speed(struct irda_task *task);
64static int ma600_reset(struct irda_task *task);
65
66/* control byte for MA600 */
67#define MA600_9600 0x00
68#define MA600_19200 0x01
69#define MA600_38400 0x02
70#define MA600_57600 0x03
71#define MA600_115200 0x04
72#define MA600_DEV_ID1 0x05
73#define MA600_DEV_ID2 0x06
74#define MA600_2400 0x08
75
76static struct dongle_reg dongle = {
77 .type = IRDA_MA600_DONGLE,
78 .open = ma600_open,
79 .close = ma600_close,
80 .reset = ma600_reset,
81 .change_speed = ma600_change_speed,
82 .owner = THIS_MODULE,
83};
84
85static int __init ma600_init(void)
86{
87 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
88 return irda_device_register_dongle(&dongle);
89}
90
91static void __exit ma600_cleanup(void)
92{
93 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
94 irda_device_unregister_dongle(&dongle);
95}
96
97/*
98 Power on:
99 (0) Clear RTS and DTR for 1 second
100 (1) Set RTS and DTR for 1 second
101 (2) 9600 bps now
102 Note: assume RTS, DTR are clear before
103*/
104static void ma600_open(dongle_t *self, struct qos_info *qos)
105{
106 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
107
108 qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400
109 |IR_57600|IR_115200;
110 qos->min_turn_time.bits = 0x01; /* Needs at least 1 ms */
111 irda_qos_bits_to_value(qos);
112
113 //self->set_dtr_rts(self->dev, FALSE, FALSE);
114 // should wait 1 second
115
116 self->set_dtr_rts(self->dev, TRUE, TRUE);
117 // should wait 1 second
118}
119
120static void ma600_close(dongle_t *self)
121{
122 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
123
124 /* Power off dongle */
125 self->set_dtr_rts(self->dev, FALSE, FALSE);
126}
127
128static __u8 get_control_byte(__u32 speed)
129{
130 __u8 byte;
131
132 switch (speed) {
133 default:
134 case 115200:
135 byte = MA600_115200;
136 break;
137 case 57600:
138 byte = MA600_57600;
139 break;
140 case 38400:
141 byte = MA600_38400;
142 break;
143 case 19200:
144 byte = MA600_19200;
145 break;
146 case 9600:
147 byte = MA600_9600;
148 break;
149 case 2400:
150 byte = MA600_2400;
151 break;
152 }
153
154 return byte;
155}
156
157/*
158 * Function ma600_change_speed (dev, state, speed)
159 *
160 * Set the speed for the MA600 type dongle. Warning, this
161 * function must be called with a process context!
162 *
163 * Algorithm
164 * 1. Reset
165 * 2. clear RTS, set DTR and wait for 1ms
166 * 3. send Control Byte to the MA600 through TXD to set new baud rate
167 * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
168 * it takes about 10 msec)
169 * 4. set RTS, set DTR (return to NORMAL Operation)
170 * 5. wait at least 10 ms, new setting (baud rate, etc) takes effect here
171 * after
172 */
173static int ma600_change_speed(struct irda_task *task)
174{
175 dongle_t *self = (dongle_t *) task->instance;
176 __u32 speed = (__u32) task->param;
177 static __u8 byte;
178 __u8 byte_echo;
179 int ret = 0;
180
181 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
182
183 ASSERT(task != NULL, return -1;);
184
185 if (self->speed_task && self->speed_task != task) {
186 IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__);
187 return msecs_to_jiffies(10);
188 } else {
189 self->speed_task = task;
190 }
191
192 switch (task->state) {
193 case IRDA_TASK_INIT:
194 case IRDA_TASK_CHILD_INIT:
195 /*
196 * Need to reset the dongle and go to 9600 bps before
197 * programming
198 */
199 if (irda_task_execute(self, ma600_reset, NULL, task,
200 (void *) speed)) {
201 /* Dongle need more time to reset */
202 irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
203
204 /* give 1 second to finish */
205 ret = msecs_to_jiffies(1000);
206 } else {
207 irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
208 }
209 break;
210
211 case IRDA_TASK_CHILD_WAIT:
212 IRDA_WARNING("%s(), resetting dongle timed out!\n",
213 __FUNCTION__);
214 ret = -1;
215 break;
216
217 case IRDA_TASK_CHILD_DONE:
218 /* Set DTR, Clear RTS */
219 self->set_dtr_rts(self->dev, TRUE, FALSE);
220
221 ret = msecs_to_jiffies(1); /* Sleep 1 ms */
222 irda_task_next_state(task, IRDA_TASK_WAIT);
223 break;
224
225 case IRDA_TASK_WAIT:
226 speed = (__u32) task->param;
227 byte = get_control_byte(speed);
228
229 /* Write control byte */
230 self->write(self->dev, &byte, sizeof(byte));
231
232 irda_task_next_state(task, IRDA_TASK_WAIT1);
233
234 /* Wait at least 10 ms */
235 ret = msecs_to_jiffies(15);
236 break;
237
238 case IRDA_TASK_WAIT1:
239 /* Read control byte echo */
240 self->read(self->dev, &byte_echo, sizeof(byte_echo));
241
242 if(byte != byte_echo) {
243 /* if control byte != echo, I don't know what to do */
244 printk(KERN_WARNING "%s() control byte written != read!\n", __FUNCTION__);
245 printk(KERN_WARNING "control byte = 0x%c%c\n",
246 hexTbl[(byte>>4)&0x0f], hexTbl[byte&0x0f]);
247 printk(KERN_WARNING "byte echo = 0x%c%c\n",
248 hexTbl[(byte_echo>>4) & 0x0f],
249 hexTbl[byte_echo & 0x0f]);
250 #ifndef NDEBUG
251 } else {
252 IRDA_DEBUG(2, "%s() control byte write read OK\n", __FUNCTION__);
253 #endif
254 }
255
256 /* Set DTR, Set RTS */
257 self->set_dtr_rts(self->dev, TRUE, TRUE);
258
259 irda_task_next_state(task, IRDA_TASK_WAIT2);
260
261 /* Wait at least 10 ms */
262 ret = msecs_to_jiffies(10);
263 break;
264
265 case IRDA_TASK_WAIT2:
266 irda_task_next_state(task, IRDA_TASK_DONE);
267 self->speed_task = NULL;
268 break;
269
270 default:
271 IRDA_ERROR("%s(), unknown state %d\n",
272 __FUNCTION__, task->state);
273 irda_task_next_state(task, IRDA_TASK_DONE);
274 self->speed_task = NULL;
275 ret = -1;
276 break;
277 }
278 return ret;
279}
280
281/*
282 * Function ma600_reset (driver)
283 *
284 * This function resets the ma600 dongle. Warning, this function
285 * must be called with a process context!!
286 *
287 * Algorithm:
288 * 0. DTR=0, RTS=1 and wait 10 ms
289 * 1. DTR=1, RTS=1 and wait 10 ms
290 * 2. 9600 bps now
291 */
292int ma600_reset(struct irda_task *task)
293{
294 dongle_t *self = (dongle_t *) task->instance;
295 int ret = 0;
296
297 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
298
299 ASSERT(task != NULL, return -1;);
300
301 if (self->reset_task && self->reset_task != task) {
302 IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__);
303 return msecs_to_jiffies(10);
304 } else
305 self->reset_task = task;
306
307 switch (task->state) {
308 case IRDA_TASK_INIT:
309 /* Clear DTR and Set RTS */
310 self->set_dtr_rts(self->dev, FALSE, TRUE);
311 irda_task_next_state(task, IRDA_TASK_WAIT1);
312 ret = msecs_to_jiffies(10); /* Sleep 10 ms */
313 break;
314 case IRDA_TASK_WAIT1:
315 /* Set DTR and RTS */
316 self->set_dtr_rts(self->dev, TRUE, TRUE);
317 irda_task_next_state(task, IRDA_TASK_WAIT2);
318 ret = msecs_to_jiffies(10); /* Sleep 10 ms */
319 break;
320 case IRDA_TASK_WAIT2:
321 irda_task_next_state(task, IRDA_TASK_DONE);
322 self->reset_task = NULL;
323 break;
324 default:
325 IRDA_ERROR("%s(), unknown state %d\n",
326 __FUNCTION__, task->state);
327 irda_task_next_state(task, IRDA_TASK_DONE);
328 self->reset_task = NULL;
329 ret = -1;
330 }
331 return ret;
332}
333
334MODULE_AUTHOR("Leung <95Etwl@alumni.ee.ust.hk> http://www.engsvr.ust/~eetwl95");
335MODULE_DESCRIPTION("MA600 dongle driver version 0.1");
336MODULE_LICENSE("GPL");
337MODULE_ALIAS("irda-dongle-11"); /* IRDA_MA600_DONGLE */
338
339/*
340 * Function init_module (void)
341 *
342 * Initialize MA600 module
343 *
344 */
345module_init(ma600_init);
346
347/*
348 * Function cleanup_module (void)
349 *
350 * Cleanup MA600 module
351 *
352 */
353module_exit(ma600_cleanup);
354
diff --git a/drivers/net/irda/mcp2120-sir.c b/drivers/net/irda/mcp2120-sir.c
new file mode 100644
index 000000000000..67bd016e4df8
--- /dev/null
+++ b/drivers/net/irda/mcp2120-sir.c
@@ -0,0 +1,230 @@
1/*********************************************************************
2 *
3 *
4 * Filename: mcp2120.c
5 * Version: 1.0
6 * Description: Implementation for the MCP2120 (Microchip)
7 * Status: Experimental.
8 * Author: Felix Tang (tangf@eyetap.org)
9 * Created at: Sun Mar 31 19:32:12 EST 2002
10 * Based on code by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 2002 Felix Tang, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 ********************************************************************/
20
21#include <linux/module.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24
25#include <net/irda/irda.h>
26
27#include "sir-dev.h"
28
29static int mcp2120_reset(struct sir_dev *dev);
30static int mcp2120_open(struct sir_dev *dev);
31static int mcp2120_close(struct sir_dev *dev);
32static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed);
33
34#define MCP2120_9600 0x87
35#define MCP2120_19200 0x8B
36#define MCP2120_38400 0x85
37#define MCP2120_57600 0x83
38#define MCP2120_115200 0x81
39
40#define MCP2120_COMMIT 0x11
41
42static struct dongle_driver mcp2120 = {
43 .owner = THIS_MODULE,
44 .driver_name = "Microchip MCP2120",
45 .type = IRDA_MCP2120_DONGLE,
46 .open = mcp2120_open,
47 .close = mcp2120_close,
48 .reset = mcp2120_reset,
49 .set_speed = mcp2120_change_speed,
50};
51
52static int __init mcp2120_sir_init(void)
53{
54 return irda_register_dongle(&mcp2120);
55}
56
57static void __exit mcp2120_sir_cleanup(void)
58{
59 irda_unregister_dongle(&mcp2120);
60}
61
62static int mcp2120_open(struct sir_dev *dev)
63{
64 struct qos_info *qos = &dev->qos;
65
66 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
67
68 /* seems no explicit power-on required here and reset switching it on anyway */
69
70 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
71 qos->min_turn_time.bits = 0x01;
72 irda_qos_bits_to_value(qos);
73
74 return 0;
75}
76
77static int mcp2120_close(struct sir_dev *dev)
78{
79 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
80
81 /* Power off dongle */
82 /* reset and inhibit mcp2120 */
83 sirdev_set_dtr_rts(dev, TRUE, TRUE);
84 // sirdev_set_dtr_rts(dev, FALSE, FALSE);
85
86 return 0;
87}
88
89/*
90 * Function mcp2120_change_speed (dev, speed)
91 *
92 * Set the speed for the MCP2120.
93 *
94 */
95
96#define MCP2120_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED+1)
97
98static int mcp2120_change_speed(struct sir_dev *dev, unsigned speed)
99{
100 unsigned state = dev->fsm.substate;
101 unsigned delay = 0;
102 u8 control[2];
103 static int ret = 0;
104
105 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
106
107 switch (state) {
108 case SIRDEV_STATE_DONGLE_SPEED:
109 /* Set DTR to enter command mode */
110 sirdev_set_dtr_rts(dev, TRUE, FALSE);
111 udelay(500);
112
113 ret = 0;
114 switch (speed) {
115 default:
116 speed = 9600;
117 ret = -EINVAL;
118 /* fall through */
119 case 9600:
120 control[0] = MCP2120_9600;
121 //printk("mcp2120 9600\n");
122 break;
123 case 19200:
124 control[0] = MCP2120_19200;
125 //printk("mcp2120 19200\n");
126 break;
127 case 34800:
128 control[0] = MCP2120_38400;
129 //printk("mcp2120 38400\n");
130 break;
131 case 57600:
132 control[0] = MCP2120_57600;
133 //printk("mcp2120 57600\n");
134 break;
135 case 115200:
136 control[0] = MCP2120_115200;
137 //printk("mcp2120 115200\n");
138 break;
139 }
140 control[1] = MCP2120_COMMIT;
141
142 /* Write control bytes */
143 sirdev_raw_write(dev, control, 2);
144 dev->speed = speed;
145
146 state = MCP2120_STATE_WAIT_SPEED;
147 delay = 100;
148 //printk("mcp2120_change_speed: dongle_speed\n");
149 break;
150
151 case MCP2120_STATE_WAIT_SPEED:
152 /* Go back to normal mode */
153 sirdev_set_dtr_rts(dev, FALSE, FALSE);
154 //printk("mcp2120_change_speed: mcp_wait\n");
155 break;
156
157 default:
158 IRDA_ERROR("%s(), undefine state %d\n", __FUNCTION__, state);
159 ret = -EINVAL;
160 break;
161 }
162 dev->fsm.substate = state;
163 return (delay > 0) ? delay : ret;
164}
165
166/*
167 * Function mcp2120_reset (driver)
168 *
169 * This function resets the mcp2120 dongle.
170 *
171 * Info: -set RTS to reset mcp2120
172 * -set DTR to set mcp2120 software command mode
173 * -mcp2120 defaults to 9600 baud after reset
174 *
175 * Algorithm:
176 * 0. Set RTS to reset mcp2120.
177 * 1. Clear RTS and wait for device reset timer of 30 ms (max).
178 *
179 */
180
181#define MCP2120_STATE_WAIT1_RESET (SIRDEV_STATE_DONGLE_RESET+1)
182#define MCP2120_STATE_WAIT2_RESET (SIRDEV_STATE_DONGLE_RESET+2)
183
184static int mcp2120_reset(struct sir_dev *dev)
185{
186 unsigned state = dev->fsm.substate;
187 unsigned delay = 0;
188 int ret = 0;
189
190 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
191
192 switch (state) {
193 case SIRDEV_STATE_DONGLE_RESET:
194 //printk("mcp2120_reset: dongle_reset\n");
195 /* Reset dongle by setting RTS*/
196 sirdev_set_dtr_rts(dev, TRUE, TRUE);
197 state = MCP2120_STATE_WAIT1_RESET;
198 delay = 50;
199 break;
200
201 case MCP2120_STATE_WAIT1_RESET:
202 //printk("mcp2120_reset: mcp2120_wait1\n");
203 /* clear RTS and wait for at least 30 ms. */
204 sirdev_set_dtr_rts(dev, FALSE, FALSE);
205 state = MCP2120_STATE_WAIT2_RESET;
206 delay = 50;
207 break;
208
209 case MCP2120_STATE_WAIT2_RESET:
210 //printk("mcp2120_reset mcp2120_wait2\n");
211 /* Go back to normal mode */
212 sirdev_set_dtr_rts(dev, FALSE, FALSE);
213 break;
214
215 default:
216 IRDA_ERROR("%s(), undefined state %d\n", __FUNCTION__, state);
217 ret = -EINVAL;
218 break;
219 }
220 dev->fsm.substate = state;
221 return (delay > 0) ? delay : ret;
222}
223
224MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
225MODULE_DESCRIPTION("Microchip MCP2120");
226MODULE_LICENSE("GPL");
227MODULE_ALIAS("irda-dongle-9"); /* IRDA_MCP2120_DONGLE */
228
229module_init(mcp2120_sir_init);
230module_exit(mcp2120_sir_cleanup);
diff --git a/drivers/net/irda/mcp2120.c b/drivers/net/irda/mcp2120.c
new file mode 100644
index 000000000000..5e6199eeef4f
--- /dev/null
+++ b/drivers/net/irda/mcp2120.c
@@ -0,0 +1,240 @@
1/*********************************************************************
2 *
3 *
4 * Filename: mcp2120.c
5 * Version: 1.0
6 * Description: Implementation for the MCP2120 (Microchip)
7 * Status: Experimental.
8 * Author: Felix Tang (tangf@eyetap.org)
9 * Created at: Sun Mar 31 19:32:12 EST 2002
10 * Based on code by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 2002 Felix Tang, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 ********************************************************************/
20
21#include <linux/module.h>
22#include <linux/delay.h>
23#include <linux/tty.h>
24#include <linux/init.h>
25
26#include <net/irda/irda.h>
27#include <net/irda/irda_device.h>
28
29static int mcp2120_reset(struct irda_task *task);
30static void mcp2120_open(dongle_t *self, struct qos_info *qos);
31static void mcp2120_close(dongle_t *self);
32static int mcp2120_change_speed(struct irda_task *task);
33
34#define MCP2120_9600 0x87
35#define MCP2120_19200 0x8B
36#define MCP2120_38400 0x85
37#define MCP2120_57600 0x83
38#define MCP2120_115200 0x81
39
40#define MCP2120_COMMIT 0x11
41
42static struct dongle_reg dongle = {
43 .type = IRDA_MCP2120_DONGLE,
44 .open = mcp2120_open,
45 .close = mcp2120_close,
46 .reset = mcp2120_reset,
47 .change_speed = mcp2120_change_speed,
48 .owner = THIS_MODULE,
49};
50
51static int __init mcp2120_init(void)
52{
53 return irda_device_register_dongle(&dongle);
54}
55
56static void __exit mcp2120_cleanup(void)
57{
58 irda_device_unregister_dongle(&dongle);
59}
60
61static void mcp2120_open(dongle_t *self, struct qos_info *qos)
62{
63 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
64 qos->min_turn_time.bits = 0x01;
65}
66
67static void mcp2120_close(dongle_t *self)
68{
69 /* Power off dongle */
70 /* reset and inhibit mcp2120 */
71 self->set_dtr_rts(self->dev, TRUE, TRUE);
72 //self->set_dtr_rts(self->dev, FALSE, FALSE);
73}
74
75/*
76 * Function mcp2120_change_speed (dev, speed)
77 *
78 * Set the speed for the MCP2120.
79 *
80 */
81static int mcp2120_change_speed(struct irda_task *task)
82{
83 dongle_t *self = (dongle_t *) task->instance;
84 __u32 speed = (__u32) task->param;
85 __u8 control[2];
86 int ret = 0;
87
88 self->speed_task = task;
89
90 switch (task->state) {
91 case IRDA_TASK_INIT:
92 /* Need to reset the dongle and go to 9600 bps before
93 programming */
94 //printk("Dmcp2120_change_speed irda_task_init\n");
95 if (irda_task_execute(self, mcp2120_reset, NULL, task,
96 (void *) speed))
97 {
98 /* Dongle need more time to reset */
99 irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
100
101 /* Give reset 1 sec to finish */
102 ret = msecs_to_jiffies(1000);
103 }
104 break;
105 case IRDA_TASK_CHILD_WAIT:
106 IRDA_WARNING("%s(), resetting dongle timed out!\n",
107 __FUNCTION__);
108 ret = -1;
109 break;
110 case IRDA_TASK_CHILD_DONE:
111 /* Set DTR to enter command mode */
112 self->set_dtr_rts(self->dev, TRUE, FALSE);
113 udelay(500);
114
115 switch (speed) {
116 case 9600:
117 default:
118 control[0] = MCP2120_9600;
119 //printk("mcp2120 9600\n");
120 break;
121 case 19200:
122 control[0] = MCP2120_19200;
123 //printk("mcp2120 19200\n");
124 break;
125 case 34800:
126 control[0] = MCP2120_38400;
127 //printk("mcp2120 38400\n");
128 break;
129 case 57600:
130 control[0] = MCP2120_57600;
131 //printk("mcp2120 57600\n");
132 break;
133 case 115200:
134 control[0] = MCP2120_115200;
135 //printk("mcp2120 115200\n");
136 break;
137 }
138 control[1] = MCP2120_COMMIT;
139
140 /* Write control bytes */
141 self->write(self->dev, control, 2);
142
143 irda_task_next_state(task, IRDA_TASK_WAIT);
144 ret = msecs_to_jiffies(100);
145 //printk("mcp2120_change_speed irda_child_done\n");
146 break;
147 case IRDA_TASK_WAIT:
148 /* Go back to normal mode */
149 self->set_dtr_rts(self->dev, FALSE, FALSE);
150 irda_task_next_state(task, IRDA_TASK_DONE);
151 self->speed_task = NULL;
152 //printk("mcp2120_change_speed irda_task_wait\n");
153 break;
154 default:
155 IRDA_ERROR("%s(), unknown state %d\n",
156 __FUNCTION__, task->state);
157 irda_task_next_state(task, IRDA_TASK_DONE);
158 self->speed_task = NULL;
159 ret = -1;
160 break;
161 }
162 return ret;
163}
164
165/*
166 * Function mcp2120_reset (driver)
167 *
168 * This function resets the mcp2120 dongle.
169 *
170 * Info: -set RTS to reset mcp2120
171 * -set DTR to set mcp2120 software command mode
172 * -mcp2120 defaults to 9600 baud after reset
173 *
174 * Algorithm:
175 * 0. Set RTS to reset mcp2120.
176 * 1. Clear RTS and wait for device reset timer of 30 ms (max).
177 *
178 */
179
180
181static int mcp2120_reset(struct irda_task *task)
182{
183 dongle_t *self = (dongle_t *) task->instance;
184 int ret = 0;
185
186 self->reset_task = task;
187
188 switch (task->state) {
189 case IRDA_TASK_INIT:
190 //printk("mcp2120_reset irda_task_init\n");
191 /* Reset dongle by setting RTS*/
192 self->set_dtr_rts(self->dev, TRUE, TRUE);
193 irda_task_next_state(task, IRDA_TASK_WAIT1);
194 ret = msecs_to_jiffies(50);
195 break;
196 case IRDA_TASK_WAIT1:
197 //printk("mcp2120_reset irda_task_wait1\n");
198 /* clear RTS and wait for at least 30 ms. */
199 self->set_dtr_rts(self->dev, FALSE, FALSE);
200 irda_task_next_state(task, IRDA_TASK_WAIT2);
201 ret = msecs_to_jiffies(50);
202 break;
203 case IRDA_TASK_WAIT2:
204 //printk("mcp2120_reset irda_task_wait2\n");
205 /* Go back to normal mode */
206 self->set_dtr_rts(self->dev, FALSE, FALSE);
207 irda_task_next_state(task, IRDA_TASK_DONE);
208 self->reset_task = NULL;
209 break;
210 default:
211 IRDA_ERROR("%s(), unknown state %d\n",
212 __FUNCTION__, task->state);
213 irda_task_next_state(task, IRDA_TASK_DONE);
214 self->reset_task = NULL;
215 ret = -1;
216 break;
217 }
218 return ret;
219}
220
221MODULE_AUTHOR("Felix Tang <tangf@eyetap.org>");
222MODULE_DESCRIPTION("Microchip MCP2120");
223MODULE_LICENSE("GPL");
224MODULE_ALIAS("irda-dongle-9"); /* IRDA_MCP2120_DONGLE */
225
226/*
227 * Function init_module (void)
228 *
229 * Initialize MCP2120 module
230 *
231 */
232module_init(mcp2120_init);
233
234/*
235 * Function cleanup_module (void)
236 *
237 * Cleanup MCP2120 module
238 *
239 */
240module_exit(mcp2120_cleanup);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
new file mode 100644
index 000000000000..805714ec9a8a
--- /dev/null
+++ b/drivers/net/irda/nsc-ircc.c
@@ -0,0 +1,2222 @@
1/*********************************************************************
2 *
3 * Filename: nsc-ircc.c
4 * Version: 1.0
5 * Description: Driver for the NSC PC'108 and PC'338 IrDA chipsets
6 * Status: Stable.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Sat Nov 7 21:43:15 1998
9 * Modified at: Wed Mar 1 11:29:34 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
14 * Copyright (c) 1998 Actisys Corp., www.actisys.com
15 * All Rights Reserved
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 * Neither Dag Brattli nor University of Tromsų admit liability nor
23 * provide warranty for any of this software. This material is
24 * provided "AS-IS" and at no charge.
25 *
26 * Notice that all functions that needs to access the chip in _any_
27 * way, must save BSR register on entry, and restore it on exit.
28 * It is _very_ important to follow this policy!
29 *
30 * __u8 bank;
31 *
32 * bank = inb(iobase+BSR);
33 *
34 * do_your_stuff_here();
35 *
36 * outb(bank, iobase+BSR);
37 *
38 * If you find bugs in this file, its very likely that the same bug
39 * will also be in w83977af_ir.c since the implementations are quite
40 * similar.
41 *
42 ********************************************************************/
43
44#include <linux/module.h>
45
46#include <linux/kernel.h>
47#include <linux/types.h>
48#include <linux/skbuff.h>
49#include <linux/netdevice.h>
50#include <linux/ioport.h>
51#include <linux/delay.h>
52#include <linux/slab.h>
53#include <linux/init.h>
54#include <linux/rtnetlink.h>
55#include <linux/dma-mapping.h>
56
57#include <asm/io.h>
58#include <asm/dma.h>
59#include <asm/byteorder.h>
60
61#include <linux/pm.h>
62
63#include <net/irda/wrapper.h>
64#include <net/irda/irda.h>
65#include <net/irda/irda_device.h>
66
67#include "nsc-ircc.h"
68
69#define CHIP_IO_EXTENT 8
70#define BROKEN_DONGLE_ID
71
72static char *driver_name = "nsc-ircc";
73
74/* Module parameters */
75static int qos_mtt_bits = 0x07; /* 1 ms or more */
76static int dongle_id;
77
78/* Use BIOS settions by default, but user may supply module parameters */
79static unsigned int io[] = { ~0, ~0, ~0, ~0 };
80static unsigned int irq[] = { 0, 0, 0, 0, 0 };
81static unsigned int dma[] = { 0, 0, 0, 0, 0 };
82
83static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info);
84static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info);
85static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info);
86static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info);
87static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info);
88static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info);
89
90/* These are the known NSC chips */
91static nsc_chip_t chips[] = {
92/* Name, {cfg registers}, chip id index reg, chip id expected value, revision mask */
93 { "PC87108", { 0x150, 0x398, 0xea }, 0x05, 0x10, 0xf0,
94 nsc_ircc_probe_108, nsc_ircc_init_108 },
95 { "PC87338", { 0x398, 0x15c, 0x2e }, 0x08, 0xb0, 0xf8,
96 nsc_ircc_probe_338, nsc_ircc_init_338 },
97 /* Contributed by Steffen Pingel - IBM X40 */
98 { "PC8738x", { 0x164e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff,
99 nsc_ircc_probe_39x, nsc_ircc_init_39x },
100 /* Contributed by Jan Frey - IBM A30/A31 */
101 { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff,
102 nsc_ircc_probe_39x, nsc_ircc_init_39x },
103 { NULL }
104};
105
106/* Max 4 instances for now */
107static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
108
109static char *dongle_types[] = {
110 "Differential serial interface",
111 "Differential serial interface",
112 "Reserved",
113 "Reserved",
114 "Sharp RY5HD01",
115 "Reserved",
116 "Single-ended serial interface",
117 "Consumer-IR only",
118 "HP HSDL-2300, HP HSDL-3600/HSDL-3610",
119 "IBM31T1100 or Temic TFDS6000/TFDS6500",
120 "Reserved",
121 "Reserved",
122 "HP HSDL-1100/HSDL-2100",
123 "HP HSDL-1100/HSDL-2100",
124 "Supports SIR Mode only",
125 "No dongle connected",
126};
127
128/* Some prototypes */
129static int nsc_ircc_open(int i, chipio_t *info);
130static int nsc_ircc_close(struct nsc_ircc_cb *self);
131static int nsc_ircc_setup(chipio_t *info);
132static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self);
133static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self);
134static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase);
135static int nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev);
136static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev);
137static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
138static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase);
139static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 baud);
140static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self);
141static int nsc_ircc_read_dongle_id (int iobase);
142static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id);
143
144static int nsc_ircc_net_open(struct net_device *dev);
145static int nsc_ircc_net_close(struct net_device *dev);
146static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
147static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev);
148static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
149
150/*
151 * Function nsc_ircc_init ()
152 *
153 * Initialize chip. Just try to find out how many chips we are dealing with
154 * and where they are
155 */
156static int __init nsc_ircc_init(void)
157{
158 chipio_t info;
159 nsc_chip_t *chip;
160 int ret = -ENODEV;
161 int cfg_base;
162 int cfg, id;
163 int reg;
164 int i = 0;
165
166 /* Probe for all the NSC chipsets we know about */
167 for (chip=chips; chip->name ; chip++) {
168 IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__,
169 chip->name);
170
171 /* Try all config registers for this chip */
172 for (cfg=0; cfg<3; cfg++) {
173 cfg_base = chip->cfg[cfg];
174 if (!cfg_base)
175 continue;
176
177 memset(&info, 0, sizeof(chipio_t));
178 info.cfg_base = cfg_base;
179 info.fir_base = io[i];
180 info.dma = dma[i];
181 info.irq = irq[i];
182
183 /* Read index register */
184 reg = inb(cfg_base);
185 if (reg == 0xff) {
186 IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __FUNCTION__, cfg_base);
187 continue;
188 }
189
190 /* Read chip identification register */
191 outb(chip->cid_index, cfg_base);
192 id = inb(cfg_base+1);
193 if ((id & chip->cid_mask) == chip->cid_value) {
194 IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
195 __FUNCTION__, chip->name, id & ~chip->cid_mask);
196 /*
197 * If the user supplies the base address, then
198 * we init the chip, if not we probe the values
199 * set by the BIOS
200 */
201 if (io[i] < 0x2000) {
202 chip->init(chip, &info);
203 } else
204 chip->probe(chip, &info);
205
206 if (nsc_ircc_open(i, &info) == 0)
207 ret = 0;
208 i++;
209 } else {
210 IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id);
211 }
212 }
213
214 }
215
216 return ret;
217}
218
219/*
220 * Function nsc_ircc_cleanup ()
221 *
222 * Close all configured chips
223 *
224 */
225static void __exit nsc_ircc_cleanup(void)
226{
227 int i;
228
229 pm_unregister_all(nsc_ircc_pmproc);
230
231 for (i=0; i < 4; i++) {
232 if (dev_self[i])
233 nsc_ircc_close(dev_self[i]);
234 }
235}
236
237/*
238 * Function nsc_ircc_open (iobase, irq)
239 *
240 * Open driver instance
241 *
242 */
243static int __init nsc_ircc_open(int i, chipio_t *info)
244{
245 struct net_device *dev;
246 struct nsc_ircc_cb *self;
247 struct pm_dev *pmdev;
248 void *ret;
249 int err;
250
251 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
252
253 IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name,
254 info->cfg_base);
255
256 if ((nsc_ircc_setup(info)) == -1)
257 return -1;
258
259 IRDA_MESSAGE("%s, driver loaded (Dag Brattli)\n", driver_name);
260
261 dev = alloc_irdadev(sizeof(struct nsc_ircc_cb));
262 if (dev == NULL) {
263 IRDA_ERROR("%s(), can't allocate memory for "
264 "control block!\n", __FUNCTION__);
265 return -ENOMEM;
266 }
267
268 self = dev->priv;
269 self->netdev = dev;
270 spin_lock_init(&self->lock);
271
272 /* Need to store self somewhere */
273 dev_self[i] = self;
274 self->index = i;
275
276 /* Initialize IO */
277 self->io.cfg_base = info->cfg_base;
278 self->io.fir_base = info->fir_base;
279 self->io.irq = info->irq;
280 self->io.fir_ext = CHIP_IO_EXTENT;
281 self->io.dma = info->dma;
282 self->io.fifo_size = 32;
283
284 /* Reserve the ioports that we need */
285 ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
286 if (!ret) {
287 IRDA_WARNING("%s(), can't get iobase of 0x%03x\n",
288 __FUNCTION__, self->io.fir_base);
289 err = -ENODEV;
290 goto out1;
291 }
292
293 /* Initialize QoS for this device */
294 irda_init_max_qos_capabilies(&self->qos);
295
296 /* The only value we must override it the baudrate */
297 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
298 IR_115200|IR_576000|IR_1152000 |(IR_4000000 << 8);
299
300 self->qos.min_turn_time.bits = qos_mtt_bits;
301 irda_qos_bits_to_value(&self->qos);
302
303 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
304 self->rx_buff.truesize = 14384;
305 self->tx_buff.truesize = 14384;
306
307 /* Allocate memory if needed */
308 self->rx_buff.head =
309 dma_alloc_coherent(NULL, self->rx_buff.truesize,
310 &self->rx_buff_dma, GFP_KERNEL);
311 if (self->rx_buff.head == NULL) {
312 err = -ENOMEM;
313 goto out2;
314
315 }
316 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
317
318 self->tx_buff.head =
319 dma_alloc_coherent(NULL, self->tx_buff.truesize,
320 &self->tx_buff_dma, GFP_KERNEL);
321 if (self->tx_buff.head == NULL) {
322 err = -ENOMEM;
323 goto out3;
324 }
325 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
326
327 self->rx_buff.in_frame = FALSE;
328 self->rx_buff.state = OUTSIDE_FRAME;
329 self->tx_buff.data = self->tx_buff.head;
330 self->rx_buff.data = self->rx_buff.head;
331
332 /* Reset Tx queue info */
333 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
334 self->tx_fifo.tail = self->tx_buff.head;
335
336 /* Override the network functions we need to use */
337 SET_MODULE_OWNER(dev);
338 dev->hard_start_xmit = nsc_ircc_hard_xmit_sir;
339 dev->open = nsc_ircc_net_open;
340 dev->stop = nsc_ircc_net_close;
341 dev->do_ioctl = nsc_ircc_net_ioctl;
342 dev->get_stats = nsc_ircc_net_get_stats;
343
344 err = register_netdev(dev);
345 if (err) {
346 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
347 goto out4;
348 }
349 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
350
351 /* Check if user has supplied a valid dongle id or not */
352 if ((dongle_id <= 0) ||
353 (dongle_id >= (sizeof(dongle_types) / sizeof(dongle_types[0]))) ) {
354 dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base);
355
356 IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name,
357 dongle_types[dongle_id]);
358 } else {
359 IRDA_MESSAGE("%s, Using dongle: %s\n", driver_name,
360 dongle_types[dongle_id]);
361 }
362
363 self->io.dongle_id = dongle_id;
364 nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id);
365
366 pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, nsc_ircc_pmproc);
367 if (pmdev)
368 pmdev->data = self;
369
370 return 0;
371 out4:
372 dma_free_coherent(NULL, self->tx_buff.truesize,
373 self->tx_buff.head, self->tx_buff_dma);
374 out3:
375 dma_free_coherent(NULL, self->rx_buff.truesize,
376 self->rx_buff.head, self->rx_buff_dma);
377 out2:
378 release_region(self->io.fir_base, self->io.fir_ext);
379 out1:
380 free_netdev(dev);
381 dev_self[i] = NULL;
382 return err;
383}
384
385/*
386 * Function nsc_ircc_close (self)
387 *
388 * Close driver instance
389 *
390 */
391static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
392{
393 int iobase;
394
395 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
396
397 IRDA_ASSERT(self != NULL, return -1;);
398
399 iobase = self->io.fir_base;
400
401 /* Remove netdevice */
402 unregister_netdev(self->netdev);
403
404 /* Release the PORT that this driver is using */
405 IRDA_DEBUG(4, "%s(), Releasing Region %03x\n",
406 __FUNCTION__, self->io.fir_base);
407 release_region(self->io.fir_base, self->io.fir_ext);
408
409 if (self->tx_buff.head)
410 dma_free_coherent(NULL, self->tx_buff.truesize,
411 self->tx_buff.head, self->tx_buff_dma);
412
413 if (self->rx_buff.head)
414 dma_free_coherent(NULL, self->rx_buff.truesize,
415 self->rx_buff.head, self->rx_buff_dma);
416
417 dev_self[self->index] = NULL;
418 free_netdev(self->netdev);
419
420 return 0;
421}
422
423/*
424 * Function nsc_ircc_init_108 (iobase, cfg_base, irq, dma)
425 *
426 * Initialize the NSC '108 chip
427 *
428 */
429static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
430{
431 int cfg_base = info->cfg_base;
432 __u8 temp=0;
433
434 outb(2, cfg_base); /* Mode Control Register (MCTL) */
435 outb(0x00, cfg_base+1); /* Disable device */
436
437 /* Base Address and Interrupt Control Register (BAIC) */
438 outb(CFG_108_BAIC, cfg_base);
439 switch (info->fir_base) {
440 case 0x3e8: outb(0x14, cfg_base+1); break;
441 case 0x2e8: outb(0x15, cfg_base+1); break;
442 case 0x3f8: outb(0x16, cfg_base+1); break;
443 case 0x2f8: outb(0x17, cfg_base+1); break;
444 default: IRDA_ERROR("%s(), invalid base_address", __FUNCTION__);
445 }
446
447 /* Control Signal Routing Register (CSRT) */
448 switch (info->irq) {
449 case 3: temp = 0x01; break;
450 case 4: temp = 0x02; break;
451 case 5: temp = 0x03; break;
452 case 7: temp = 0x04; break;
453 case 9: temp = 0x05; break;
454 case 11: temp = 0x06; break;
455 case 15: temp = 0x07; break;
456 default: IRDA_ERROR("%s(), invalid irq", __FUNCTION__);
457 }
458 outb(CFG_108_CSRT, cfg_base);
459
460 switch (info->dma) {
461 case 0: outb(0x08+temp, cfg_base+1); break;
462 case 1: outb(0x10+temp, cfg_base+1); break;
463 case 3: outb(0x18+temp, cfg_base+1); break;
464 default: IRDA_ERROR("%s(), invalid dma", __FUNCTION__);
465 }
466
467 outb(CFG_108_MCTL, cfg_base); /* Mode Control Register (MCTL) */
468 outb(0x03, cfg_base+1); /* Enable device */
469
470 return 0;
471}
472
473/*
474 * Function nsc_ircc_probe_108 (chip, info)
475 *
476 *
477 *
478 */
479static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
480{
481 int cfg_base = info->cfg_base;
482 int reg;
483
484 /* Read address and interrupt control register (BAIC) */
485 outb(CFG_108_BAIC, cfg_base);
486 reg = inb(cfg_base+1);
487
488 switch (reg & 0x03) {
489 case 0:
490 info->fir_base = 0x3e8;
491 break;
492 case 1:
493 info->fir_base = 0x2e8;
494 break;
495 case 2:
496 info->fir_base = 0x3f8;
497 break;
498 case 3:
499 info->fir_base = 0x2f8;
500 break;
501 }
502 info->sir_base = info->fir_base;
503 IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__,
504 info->fir_base);
505
506 /* Read control signals routing register (CSRT) */
507 outb(CFG_108_CSRT, cfg_base);
508 reg = inb(cfg_base+1);
509
510 switch (reg & 0x07) {
511 case 0:
512 info->irq = -1;
513 break;
514 case 1:
515 info->irq = 3;
516 break;
517 case 2:
518 info->irq = 4;
519 break;
520 case 3:
521 info->irq = 5;
522 break;
523 case 4:
524 info->irq = 7;
525 break;
526 case 5:
527 info->irq = 9;
528 break;
529 case 6:
530 info->irq = 11;
531 break;
532 case 7:
533 info->irq = 15;
534 break;
535 }
536 IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq);
537
538 /* Currently we only read Rx DMA but it will also be used for Tx */
539 switch ((reg >> 3) & 0x03) {
540 case 0:
541 info->dma = -1;
542 break;
543 case 1:
544 info->dma = 0;
545 break;
546 case 2:
547 info->dma = 1;
548 break;
549 case 3:
550 info->dma = 3;
551 break;
552 }
553 IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma);
554
555 /* Read mode control register (MCTL) */
556 outb(CFG_108_MCTL, cfg_base);
557 reg = inb(cfg_base+1);
558
559 info->enabled = reg & 0x01;
560 info->suspended = !((reg >> 1) & 0x01);
561
562 return 0;
563}
564
565/*
566 * Function nsc_ircc_init_338 (chip, info)
567 *
568 * Initialize the NSC '338 chip. Remember that the 87338 needs two
569 * consecutive writes to the data registers while CPU interrupts are
570 * disabled. The 97338 does not require this, but shouldn't be any
571 * harm if we do it anyway.
572 */
573static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info)
574{
575 /* No init yet */
576
577 return 0;
578}
579
580/*
581 * Function nsc_ircc_probe_338 (chip, info)
582 *
583 *
584 *
585 */
586static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info)
587{
588 int cfg_base = info->cfg_base;
589 int reg, com = 0;
590 int pnp;
591
592 /* Read funtion enable register (FER) */
593 outb(CFG_338_FER, cfg_base);
594 reg = inb(cfg_base+1);
595
596 info->enabled = (reg >> 2) & 0x01;
597
598 /* Check if we are in Legacy or PnP mode */
599 outb(CFG_338_PNP0, cfg_base);
600 reg = inb(cfg_base+1);
601
602 pnp = (reg >> 3) & 0x01;
603 if (pnp) {
604 IRDA_DEBUG(2, "(), Chip is in PnP mode\n");
605 outb(0x46, cfg_base);
606 reg = (inb(cfg_base+1) & 0xfe) << 2;
607
608 outb(0x47, cfg_base);
609 reg |= ((inb(cfg_base+1) & 0xfc) << 8);
610
611 info->fir_base = reg;
612 } else {
613 /* Read function address register (FAR) */
614 outb(CFG_338_FAR, cfg_base);
615 reg = inb(cfg_base+1);
616
617 switch ((reg >> 4) & 0x03) {
618 case 0:
619 info->fir_base = 0x3f8;
620 break;
621 case 1:
622 info->fir_base = 0x2f8;
623 break;
624 case 2:
625 com = 3;
626 break;
627 case 3:
628 com = 4;
629 break;
630 }
631
632 if (com) {
633 switch ((reg >> 6) & 0x03) {
634 case 0:
635 if (com == 3)
636 info->fir_base = 0x3e8;
637 else
638 info->fir_base = 0x2e8;
639 break;
640 case 1:
641 if (com == 3)
642 info->fir_base = 0x338;
643 else
644 info->fir_base = 0x238;
645 break;
646 case 2:
647 if (com == 3)
648 info->fir_base = 0x2e8;
649 else
650 info->fir_base = 0x2e0;
651 break;
652 case 3:
653 if (com == 3)
654 info->fir_base = 0x220;
655 else
656 info->fir_base = 0x228;
657 break;
658 }
659 }
660 }
661 info->sir_base = info->fir_base;
662
663 /* Read PnP register 1 (PNP1) */
664 outb(CFG_338_PNP1, cfg_base);
665 reg = inb(cfg_base+1);
666
667 info->irq = reg >> 4;
668
669 /* Read PnP register 3 (PNP3) */
670 outb(CFG_338_PNP3, cfg_base);
671 reg = inb(cfg_base+1);
672
673 info->dma = (reg & 0x07) - 1;
674
675 /* Read power and test register (PTR) */
676 outb(CFG_338_PTR, cfg_base);
677 reg = inb(cfg_base+1);
678
679 info->suspended = reg & 0x01;
680
681 return 0;
682}
683
684
685/*
686 * Function nsc_ircc_init_39x (chip, info)
687 *
688 * Now that we know it's a '39x (see probe below), we need to
689 * configure it so we can use it.
690 *
691 * The NSC '338 chip is a Super I/O chip with a "bank" architecture,
692 * the configuration of the different functionality (serial, parallel,
693 * floppy...) are each in a different bank (Logical Device Number).
694 * The base address, irq and dma configuration registers are common
695 * to all functionalities (index 0x30 to 0x7F).
696 * There is only one configuration register specific to the
697 * serial port, CFG_39X_SPC.
698 * JeanII
699 *
700 * Note : this code was written by Jan Frey <janfrey@web.de>
701 */
702static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info)
703{
704 int cfg_base = info->cfg_base;
705 int enabled;
706
707 /* User is shure about his config... accept it. */
708 IRDA_DEBUG(2, "%s(): nsc_ircc_init_39x (user settings): "
709 "io=0x%04x, irq=%d, dma=%d\n",
710 __FUNCTION__, info->fir_base, info->irq, info->dma);
711
712 /* Access bank for SP2 */
713 outb(CFG_39X_LDN, cfg_base);
714 outb(0x02, cfg_base+1);
715
716 /* Configure SP2 */
717
718 /* We want to enable the device if not enabled */
719 outb(CFG_39X_ACT, cfg_base);
720 enabled = inb(cfg_base+1) & 0x01;
721
722 if (!enabled) {
723 /* Enable the device */
724 outb(CFG_39X_SIOCF1, cfg_base);
725 outb(0x01, cfg_base+1);
726 /* May want to update info->enabled. Jean II */
727 }
728
729 /* Enable UART bank switching (bit 7) ; Sets the chip to normal
730 * power mode (wake up from sleep mode) (bit 1) */
731 outb(CFG_39X_SPC, cfg_base);
732 outb(0x82, cfg_base+1);
733
734 return 0;
735}
736
737/*
738 * Function nsc_ircc_probe_39x (chip, info)
739 *
740 * Test if we really have a '39x chip at the given address
741 *
742 * Note : this code was written by Jan Frey <janfrey@web.de>
743 */
744static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info)
745{
746 int cfg_base = info->cfg_base;
747 int reg1, reg2, irq, irqt, dma1, dma2;
748 int enabled, susp;
749
750 IRDA_DEBUG(2, "%s(), nsc_ircc_probe_39x, base=%d\n",
751 __FUNCTION__, cfg_base);
752
753 /* This function should be executed with irq off to avoid
754 * another driver messing with the Super I/O bank - Jean II */
755
756 /* Access bank for SP2 */
757 outb(CFG_39X_LDN, cfg_base);
758 outb(0x02, cfg_base+1);
759
760 /* Read infos about SP2 ; store in info struct */
761 outb(CFG_39X_BASEH, cfg_base);
762 reg1 = inb(cfg_base+1);
763 outb(CFG_39X_BASEL, cfg_base);
764 reg2 = inb(cfg_base+1);
765 info->fir_base = (reg1 << 8) | reg2;
766
767 outb(CFG_39X_IRQNUM, cfg_base);
768 irq = inb(cfg_base+1);
769 outb(CFG_39X_IRQSEL, cfg_base);
770 irqt = inb(cfg_base+1);
771 info->irq = irq;
772
773 outb(CFG_39X_DMA0, cfg_base);
774 dma1 = inb(cfg_base+1);
775 outb(CFG_39X_DMA1, cfg_base);
776 dma2 = inb(cfg_base+1);
777 info->dma = dma1 -1;
778
779 outb(CFG_39X_ACT, cfg_base);
780 info->enabled = enabled = inb(cfg_base+1) & 0x01;
781
782 outb(CFG_39X_SPC, cfg_base);
783 susp = 1 - ((inb(cfg_base+1) & 0x02) >> 1);
784
785 IRDA_DEBUG(2, "%s(): io=0x%02x%02x, irq=%d (type %d), rxdma=%d, txdma=%d, enabled=%d (suspended=%d)\n", __FUNCTION__, reg1,reg2,irq,irqt,dma1,dma2,enabled,susp);
786
787 /* Configure SP2 */
788
789 /* We want to enable the device if not enabled */
790 outb(CFG_39X_ACT, cfg_base);
791 enabled = inb(cfg_base+1) & 0x01;
792
793 if (!enabled) {
794 /* Enable the device */
795 outb(CFG_39X_SIOCF1, cfg_base);
796 outb(0x01, cfg_base+1);
797 /* May want to update info->enabled. Jean II */
798 }
799
800 /* Enable UART bank switching (bit 7) ; Sets the chip to normal
801 * power mode (wake up from sleep mode) (bit 1) */
802 outb(CFG_39X_SPC, cfg_base);
803 outb(0x82, cfg_base+1);
804
805 return 0;
806}
807
808/*
809 * Function nsc_ircc_setup (info)
810 *
811 * Returns non-negative on success.
812 *
813 */
814static int nsc_ircc_setup(chipio_t *info)
815{
816 int version;
817 int iobase = info->fir_base;
818
819 /* Read the Module ID */
820 switch_bank(iobase, BANK3);
821 version = inb(iobase+MID);
822
823 IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n",
824 __FUNCTION__, driver_name, version);
825
826 /* Should be 0x2? */
827 if (0x20 != (version & 0xf0)) {
828 IRDA_ERROR("%s, Wrong chip version %02x\n",
829 driver_name, version);
830 return -1;
831 }
832
833 /* Switch to advanced mode */
834 switch_bank(iobase, BANK2);
835 outb(ECR1_EXT_SL, iobase+ECR1);
836 switch_bank(iobase, BANK0);
837
838 /* Set FIFO threshold to TX17, RX16, reset and enable FIFO's */
839 switch_bank(iobase, BANK0);
840 outb(FCR_RXTH|FCR_TXTH|FCR_TXSR|FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
841
842 outb(0x03, iobase+LCR); /* 8 bit word length */
843 outb(MCR_SIR, iobase+MCR); /* Start at SIR-mode, also clears LSR*/
844
845 /* Set FIFO size to 32 */
846 switch_bank(iobase, BANK2);
847 outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
848
849 /* IRCR2: FEND_MD is not set */
850 switch_bank(iobase, BANK5);
851 outb(0x02, iobase+4);
852
853 /* Make sure that some defaults are OK */
854 switch_bank(iobase, BANK6);
855 outb(0x20, iobase+0); /* Set 32 bits FIR CRC */
856 outb(0x0a, iobase+1); /* Set MIR pulse width */
857 outb(0x0d, iobase+2); /* Set SIR pulse width to 1.6us */
858 outb(0x2a, iobase+4); /* Set beginning frag, and preamble length */
859
860 /* Enable receive interrupts */
861 switch_bank(iobase, BANK0);
862 outb(IER_RXHDL_IE, iobase+IER);
863
864 return 0;
865}
866
867/*
868 * Function nsc_ircc_read_dongle_id (void)
869 *
870 * Try to read dongle indentification. This procedure needs to be executed
871 * once after power-on/reset. It also needs to be used whenever you suspect
872 * that the user may have plugged/unplugged the IrDA Dongle.
873 */
874static int nsc_ircc_read_dongle_id (int iobase)
875{
876 int dongle_id;
877 __u8 bank;
878
879 bank = inb(iobase+BSR);
880
881 /* Select Bank 7 */
882 switch_bank(iobase, BANK7);
883
884 /* IRCFG4: IRSL0_DS and IRSL21_DS are cleared */
885 outb(0x00, iobase+7);
886
887 /* ID0, 1, and 2 are pulled up/down very slowly */
888 udelay(50);
889
890 /* IRCFG1: read the ID bits */
891 dongle_id = inb(iobase+4) & 0x0f;
892
893#ifdef BROKEN_DONGLE_ID
894 if (dongle_id == 0x0a)
895 dongle_id = 0x09;
896#endif
897 /* Go back to bank 0 before returning */
898 switch_bank(iobase, BANK0);
899
900 outb(bank, iobase+BSR);
901
902 return dongle_id;
903}
904
905/*
906 * Function nsc_ircc_init_dongle_interface (iobase, dongle_id)
907 *
908 * This function initializes the dongle for the transceiver that is
909 * used. This procedure needs to be executed once after
910 * power-on/reset. It also needs to be used whenever you suspect that
911 * the dongle is changed.
912 */
913static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
914{
915 int bank;
916
917 /* Save current bank */
918 bank = inb(iobase+BSR);
919
920 /* Select Bank 7 */
921 switch_bank(iobase, BANK7);
922
923 /* IRCFG4: set according to dongle_id */
924 switch (dongle_id) {
925 case 0x00: /* same as */
926 case 0x01: /* Differential serial interface */
927 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
928 __FUNCTION__, dongle_types[dongle_id]);
929 break;
930 case 0x02: /* same as */
931 case 0x03: /* Reserved */
932 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
933 __FUNCTION__, dongle_types[dongle_id]);
934 break;
935 case 0x04: /* Sharp RY5HD01 */
936 break;
937 case 0x05: /* Reserved, but this is what the Thinkpad reports */
938 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
939 __FUNCTION__, dongle_types[dongle_id]);
940 break;
941 case 0x06: /* Single-ended serial interface */
942 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
943 __FUNCTION__, dongle_types[dongle_id]);
944 break;
945 case 0x07: /* Consumer-IR only */
946 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
947 __FUNCTION__, dongle_types[dongle_id]);
948 break;
949 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
950 IRDA_DEBUG(0, "%s(), %s\n",
951 __FUNCTION__, dongle_types[dongle_id]);
952 break;
953 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
954 outb(0x28, iobase+7); /* Set irsl[0-2] as output */
955 break;
956 case 0x0A: /* same as */
957 case 0x0B: /* Reserved */
958 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
959 __FUNCTION__, dongle_types[dongle_id]);
960 break;
961 case 0x0C: /* same as */
962 case 0x0D: /* HP HSDL-1100/HSDL-2100 */
963 /*
964 * Set irsl0 as input, irsl[1-2] as output, and separate
965 * inputs are used for SIR and MIR/FIR
966 */
967 outb(0x48, iobase+7);
968 break;
969 case 0x0E: /* Supports SIR Mode only */
970 outb(0x28, iobase+7); /* Set irsl[0-2] as output */
971 break;
972 case 0x0F: /* No dongle connected */
973 IRDA_DEBUG(0, "%s(), %s\n",
974 __FUNCTION__, dongle_types[dongle_id]);
975
976 switch_bank(iobase, BANK0);
977 outb(0x62, iobase+MCR);
978 break;
979 default:
980 IRDA_DEBUG(0, "%s(), invalid dongle_id %#x",
981 __FUNCTION__, dongle_id);
982 }
983
984 /* IRCFG1: IRSL1 and 2 are set to IrDA mode */
985 outb(0x00, iobase+4);
986
987 /* Restore bank register */
988 outb(bank, iobase+BSR);
989
990} /* set_up_dongle_interface */
991
992/*
993 * Function nsc_ircc_change_dongle_speed (iobase, speed, dongle_id)
994 *
995 * Change speed of the attach dongle
996 *
997 */
998static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
999{
1000 __u8 bank;
1001
1002 /* Save current bank */
1003 bank = inb(iobase+BSR);
1004
1005 /* Select Bank 7 */
1006 switch_bank(iobase, BANK7);
1007
1008 /* IRCFG1: set according to dongle_id */
1009 switch (dongle_id) {
1010 case 0x00: /* same as */
1011 case 0x01: /* Differential serial interface */
1012 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1013 __FUNCTION__, dongle_types[dongle_id]);
1014 break;
1015 case 0x02: /* same as */
1016 case 0x03: /* Reserved */
1017 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1018 __FUNCTION__, dongle_types[dongle_id]);
1019 break;
1020 case 0x04: /* Sharp RY5HD01 */
1021 break;
1022 case 0x05: /* Reserved */
1023 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1024 __FUNCTION__, dongle_types[dongle_id]);
1025 break;
1026 case 0x06: /* Single-ended serial interface */
1027 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1028 __FUNCTION__, dongle_types[dongle_id]);
1029 break;
1030 case 0x07: /* Consumer-IR only */
1031 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
1032 __FUNCTION__, dongle_types[dongle_id]);
1033 break;
1034 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
1035 IRDA_DEBUG(0, "%s(), %s\n",
1036 __FUNCTION__, dongle_types[dongle_id]);
1037 outb(0x00, iobase+4);
1038 if (speed > 115200)
1039 outb(0x01, iobase+4);
1040 break;
1041 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
1042 outb(0x01, iobase+4);
1043
1044 if (speed == 4000000) {
1045 /* There was a cli() there, but we now are already
1046 * under spin_lock_irqsave() - JeanII */
1047 outb(0x81, iobase+4);
1048 outb(0x80, iobase+4);
1049 } else
1050 outb(0x00, iobase+4);
1051 break;
1052 case 0x0A: /* same as */
1053 case 0x0B: /* Reserved */
1054 IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
1055 __FUNCTION__, dongle_types[dongle_id]);
1056 break;
1057 case 0x0C: /* same as */
1058 case 0x0D: /* HP HSDL-1100/HSDL-2100 */
1059 break;
1060 case 0x0E: /* Supports SIR Mode only */
1061 break;
1062 case 0x0F: /* No dongle connected */
1063 IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
1064 __FUNCTION__, dongle_types[dongle_id]);
1065
1066 switch_bank(iobase, BANK0);
1067 outb(0x62, iobase+MCR);
1068 break;
1069 default:
1070 IRDA_DEBUG(0, "%s(), invalid data_rate\n", __FUNCTION__);
1071 }
1072 /* Restore bank register */
1073 outb(bank, iobase+BSR);
1074}
1075
1076/*
1077 * Function nsc_ircc_change_speed (self, baud)
1078 *
1079 * Change the speed of the device
1080 *
1081 * This function *must* be called with irq off and spin-lock.
1082 */
1083static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
1084{
1085 struct net_device *dev = self->netdev;
1086 __u8 mcr = MCR_SIR;
1087 int iobase;
1088 __u8 bank;
1089 __u8 ier; /* Interrupt enable register */
1090
1091 IRDA_DEBUG(2, "%s(), speed=%d\n", __FUNCTION__, speed);
1092
1093 IRDA_ASSERT(self != NULL, return 0;);
1094
1095 iobase = self->io.fir_base;
1096
1097 /* Update accounting for new speed */
1098 self->io.speed = speed;
1099
1100 /* Save current bank */
1101 bank = inb(iobase+BSR);
1102
1103 /* Disable interrupts */
1104 switch_bank(iobase, BANK0);
1105 outb(0, iobase+IER);
1106
1107 /* Select Bank 2 */
1108 switch_bank(iobase, BANK2);
1109
1110 outb(0x00, iobase+BGDH);
1111 switch (speed) {
1112 case 9600: outb(0x0c, iobase+BGDL); break;
1113 case 19200: outb(0x06, iobase+BGDL); break;
1114 case 38400: outb(0x03, iobase+BGDL); break;
1115 case 57600: outb(0x02, iobase+BGDL); break;
1116 case 115200: outb(0x01, iobase+BGDL); break;
1117 case 576000:
1118 switch_bank(iobase, BANK5);
1119
1120 /* IRCR2: MDRS is set */
1121 outb(inb(iobase+4) | 0x04, iobase+4);
1122
1123 mcr = MCR_MIR;
1124 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__);
1125 break;
1126 case 1152000:
1127 mcr = MCR_MIR;
1128 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__);
1129 break;
1130 case 4000000:
1131 mcr = MCR_FIR;
1132 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__);
1133 break;
1134 default:
1135 mcr = MCR_FIR;
1136 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n",
1137 __FUNCTION__, speed);
1138 break;
1139 }
1140
1141 /* Set appropriate speed mode */
1142 switch_bank(iobase, BANK0);
1143 outb(mcr | MCR_TX_DFR, iobase+MCR);
1144
1145 /* Give some hits to the transceiver */
1146 nsc_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
1147
1148 /* Set FIFO threshold to TX17, RX16 */
1149 switch_bank(iobase, BANK0);
1150 outb(0x00, iobase+FCR);
1151 outb(FCR_FIFO_EN, iobase+FCR);
1152 outb(FCR_RXTH| /* Set Rx FIFO threshold */
1153 FCR_TXTH| /* Set Tx FIFO threshold */
1154 FCR_TXSR| /* Reset Tx FIFO */
1155 FCR_RXSR| /* Reset Rx FIFO */
1156 FCR_FIFO_EN, /* Enable FIFOs */
1157 iobase+FCR);
1158
1159 /* Set FIFO size to 32 */
1160 switch_bank(iobase, BANK2);
1161 outb(EXCR2_RFSIZ|EXCR2_TFSIZ, iobase+EXCR2);
1162
1163 /* Enable some interrupts so we can receive frames */
1164 switch_bank(iobase, BANK0);
1165 if (speed > 115200) {
1166 /* Install FIR xmit handler */
1167 dev->hard_start_xmit = nsc_ircc_hard_xmit_fir;
1168 ier = IER_SFIF_IE;
1169 nsc_ircc_dma_receive(self);
1170 } else {
1171 /* Install SIR xmit handler */
1172 dev->hard_start_xmit = nsc_ircc_hard_xmit_sir;
1173 ier = IER_RXHDL_IE;
1174 }
1175 /* Set our current interrupt mask */
1176 outb(ier, iobase+IER);
1177
1178 /* Restore BSR */
1179 outb(bank, iobase+BSR);
1180
1181 /* Make sure interrupt handlers keep the proper interrupt mask */
1182 return(ier);
1183}
1184
1185/*
1186 * Function nsc_ircc_hard_xmit (skb, dev)
1187 *
1188 * Transmit the frame!
1189 *
1190 */
1191static int nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
1192{
1193 struct nsc_ircc_cb *self;
1194 unsigned long flags;
1195 int iobase;
1196 __s32 speed;
1197 __u8 bank;
1198
1199 self = (struct nsc_ircc_cb *) dev->priv;
1200
1201 IRDA_ASSERT(self != NULL, return 0;);
1202
1203 iobase = self->io.fir_base;
1204
1205 netif_stop_queue(dev);
1206
1207 /* Make sure tests *& speed change are atomic */
1208 spin_lock_irqsave(&self->lock, flags);
1209
1210 /* Check if we need to change the speed */
1211 speed = irda_get_next_speed(skb);
1212 if ((speed != self->io.speed) && (speed != -1)) {
1213 /* Check for empty frame. */
1214 if (!skb->len) {
1215 /* If we just sent a frame, we get called before
1216 * the last bytes get out (because of the SIR FIFO).
1217 * If this is the case, let interrupt handler change
1218 * the speed itself... Jean II */
1219 if (self->io.direction == IO_RECV) {
1220 nsc_ircc_change_speed(self, speed);
1221 /* TODO : For SIR->SIR, the next packet
1222 * may get corrupted - Jean II */
1223 netif_wake_queue(dev);
1224 } else {
1225 self->new_speed = speed;
1226 /* Queue will be restarted after speed change
1227 * to make sure packets gets through the
1228 * proper xmit handler - Jean II */
1229 }
1230 dev->trans_start = jiffies;
1231 spin_unlock_irqrestore(&self->lock, flags);
1232 dev_kfree_skb(skb);
1233 return 0;
1234 } else
1235 self->new_speed = speed;
1236 }
1237
1238 /* Save current bank */
1239 bank = inb(iobase+BSR);
1240
1241 self->tx_buff.data = self->tx_buff.head;
1242
1243 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
1244 self->tx_buff.truesize);
1245
1246 self->stats.tx_bytes += self->tx_buff.len;
1247
1248 /* Add interrupt on tx low level (will fire immediately) */
1249 switch_bank(iobase, BANK0);
1250 outb(IER_TXLDL_IE, iobase+IER);
1251
1252 /* Restore bank register */
1253 outb(bank, iobase+BSR);
1254
1255 dev->trans_start = jiffies;
1256 spin_unlock_irqrestore(&self->lock, flags);
1257
1258 dev_kfree_skb(skb);
1259
1260 return 0;
1261}
1262
1263static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1264{
1265 struct nsc_ircc_cb *self;
1266 unsigned long flags;
1267 int iobase;
1268 __s32 speed;
1269 __u8 bank;
1270 int mtt, diff;
1271
1272 self = (struct nsc_ircc_cb *) dev->priv;
1273 iobase = self->io.fir_base;
1274
1275 netif_stop_queue(dev);
1276
1277 /* Make sure tests *& speed change are atomic */
1278 spin_lock_irqsave(&self->lock, flags);
1279
1280 /* Check if we need to change the speed */
1281 speed = irda_get_next_speed(skb);
1282 if ((speed != self->io.speed) && (speed != -1)) {
1283 /* Check for empty frame. */
1284 if (!skb->len) {
1285 /* If we are currently transmitting, defer to
1286 * interrupt handler. - Jean II */
1287 if(self->tx_fifo.len == 0) {
1288 nsc_ircc_change_speed(self, speed);
1289 netif_wake_queue(dev);
1290 } else {
1291 self->new_speed = speed;
1292 /* Keep queue stopped :
1293 * the speed change operation may change the
1294 * xmit handler, and we want to make sure
1295 * the next packet get through the proper
1296 * Tx path, so block the Tx queue until
1297 * the speed change has been done.
1298 * Jean II */
1299 }
1300 dev->trans_start = jiffies;
1301 spin_unlock_irqrestore(&self->lock, flags);
1302 dev_kfree_skb(skb);
1303 return 0;
1304 } else {
1305 /* Change speed after current frame */
1306 self->new_speed = speed;
1307 }
1308 }
1309
1310 /* Save current bank */
1311 bank = inb(iobase+BSR);
1312
1313 /* Register and copy this frame to DMA memory */
1314 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
1315 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
1316 self->tx_fifo.tail += skb->len;
1317
1318 self->stats.tx_bytes += skb->len;
1319
1320 memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
1321 skb->len);
1322
1323 self->tx_fifo.len++;
1324 self->tx_fifo.free++;
1325
1326 /* Start transmit only if there is currently no transmit going on */
1327 if (self->tx_fifo.len == 1) {
1328 /* Check if we must wait the min turn time or not */
1329 mtt = irda_get_mtt(skb);
1330 if (mtt) {
1331 /* Check how much time we have used already */
1332 do_gettimeofday(&self->now);
1333 diff = self->now.tv_usec - self->stamp.tv_usec;
1334 if (diff < 0)
1335 diff += 1000000;
1336
1337 /* Check if the mtt is larger than the time we have
1338 * already used by all the protocol processing
1339 */
1340 if (mtt > diff) {
1341 mtt -= diff;
1342
1343 /*
1344 * Use timer if delay larger than 125 us, and
1345 * use udelay for smaller values which should
1346 * be acceptable
1347 */
1348 if (mtt > 125) {
1349 /* Adjust for timer resolution */
1350 mtt = mtt / 125;
1351
1352 /* Setup timer */
1353 switch_bank(iobase, BANK4);
1354 outb(mtt & 0xff, iobase+TMRL);
1355 outb((mtt >> 8) & 0x0f, iobase+TMRH);
1356
1357 /* Start timer */
1358 outb(IRCR1_TMR_EN, iobase+IRCR1);
1359 self->io.direction = IO_XMIT;
1360
1361 /* Enable timer interrupt */
1362 switch_bank(iobase, BANK0);
1363 outb(IER_TMR_IE, iobase+IER);
1364
1365 /* Timer will take care of the rest */
1366 goto out;
1367 } else
1368 udelay(mtt);
1369 }
1370 }
1371 /* Enable DMA interrupt */
1372 switch_bank(iobase, BANK0);
1373 outb(IER_DMA_IE, iobase+IER);
1374
1375 /* Transmit frame */
1376 nsc_ircc_dma_xmit(self, iobase);
1377 }
1378 out:
1379 /* Not busy transmitting anymore if window is not full,
1380 * and if we don't need to change speed */
1381 if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0))
1382 netif_wake_queue(self->netdev);
1383
1384 /* Restore bank register */
1385 outb(bank, iobase+BSR);
1386
1387 dev->trans_start = jiffies;
1388 spin_unlock_irqrestore(&self->lock, flags);
1389 dev_kfree_skb(skb);
1390
1391 return 0;
1392}
1393
1394/*
1395 * Function nsc_ircc_dma_xmit (self, iobase)
1396 *
1397 * Transmit data using DMA
1398 *
1399 */
1400static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase)
1401{
1402 int bsr;
1403
1404 /* Save current bank */
1405 bsr = inb(iobase+BSR);
1406
1407 /* Disable DMA */
1408 switch_bank(iobase, BANK0);
1409 outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
1410
1411 self->io.direction = IO_XMIT;
1412
1413 /* Choose transmit DMA channel */
1414 switch_bank(iobase, BANK2);
1415 outb(ECR1_DMASWP|ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1);
1416
1417 irda_setup_dma(self->io.dma,
1418 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
1419 self->tx_buff.head) + self->tx_buff_dma,
1420 self->tx_fifo.queue[self->tx_fifo.ptr].len,
1421 DMA_TX_MODE);
1422
1423 /* Enable DMA and SIR interaction pulse */
1424 switch_bank(iobase, BANK0);
1425 outb(inb(iobase+MCR)|MCR_TX_DFR|MCR_DMA_EN|MCR_IR_PLS, iobase+MCR);
1426
1427 /* Restore bank register */
1428 outb(bsr, iobase+BSR);
1429}
1430
1431/*
1432 * Function nsc_ircc_pio_xmit (self, iobase)
1433 *
1434 * Transmit data using PIO. Returns the number of bytes that actually
1435 * got transferred
1436 *
1437 */
1438static int nsc_ircc_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
1439{
1440 int actual = 0;
1441 __u8 bank;
1442
1443 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
1444
1445 /* Save current bank */
1446 bank = inb(iobase+BSR);
1447
1448 switch_bank(iobase, BANK0);
1449 if (!(inb_p(iobase+LSR) & LSR_TXEMP)) {
1450 IRDA_DEBUG(4, "%s(), warning, FIFO not empty yet!\n",
1451 __FUNCTION__);
1452
1453 /* FIFO may still be filled to the Tx interrupt threshold */
1454 fifo_size -= 17;
1455 }
1456
1457 /* Fill FIFO with current frame */
1458 while ((fifo_size-- > 0) && (actual < len)) {
1459 /* Transmit next byte */
1460 outb(buf[actual++], iobase+TXD);
1461 }
1462
1463 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
1464 __FUNCTION__, fifo_size, actual, len);
1465
1466 /* Restore bank */
1467 outb(bank, iobase+BSR);
1468
1469 return actual;
1470}
1471
1472/*
1473 * Function nsc_ircc_dma_xmit_complete (self)
1474 *
1475 * The transfer of a frame in finished. This function will only be called
1476 * by the interrupt handler
1477 *
1478 */
1479static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
1480{
1481 int iobase;
1482 __u8 bank;
1483 int ret = TRUE;
1484
1485 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
1486
1487 iobase = self->io.fir_base;
1488
1489 /* Save current bank */
1490 bank = inb(iobase+BSR);
1491
1492 /* Disable DMA */
1493 switch_bank(iobase, BANK0);
1494 outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
1495
1496 /* Check for underrrun! */
1497 if (inb(iobase+ASCR) & ASCR_TXUR) {
1498 self->stats.tx_errors++;
1499 self->stats.tx_fifo_errors++;
1500
1501 /* Clear bit, by writing 1 into it */
1502 outb(ASCR_TXUR, iobase+ASCR);
1503 } else {
1504 self->stats.tx_packets++;
1505 }
1506
1507 /* Finished with this frame, so prepare for next */
1508 self->tx_fifo.ptr++;
1509 self->tx_fifo.len--;
1510
1511 /* Any frames to be sent back-to-back? */
1512 if (self->tx_fifo.len) {
1513 nsc_ircc_dma_xmit(self, iobase);
1514
1515 /* Not finished yet! */
1516 ret = FALSE;
1517 } else {
1518 /* Reset Tx FIFO info */
1519 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1520 self->tx_fifo.tail = self->tx_buff.head;
1521 }
1522
1523 /* Make sure we have room for more frames and
1524 * that we don't need to change speed */
1525 if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) {
1526 /* Not busy transmitting anymore */
1527 /* Tell the network layer, that we can accept more frames */
1528 netif_wake_queue(self->netdev);
1529 }
1530
1531 /* Restore bank */
1532 outb(bank, iobase+BSR);
1533
1534 return ret;
1535}
1536
1537/*
1538 * Function nsc_ircc_dma_receive (self)
1539 *
1540 * Get ready for receiving a frame. The device will initiate a DMA
1541 * if it starts to receive a frame.
1542 *
1543 */
1544static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self)
1545{
1546 int iobase;
1547 __u8 bsr;
1548
1549 iobase = self->io.fir_base;
1550
1551 /* Reset Tx FIFO info */
1552 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1553 self->tx_fifo.tail = self->tx_buff.head;
1554
1555 /* Save current bank */
1556 bsr = inb(iobase+BSR);
1557
1558 /* Disable DMA */
1559 switch_bank(iobase, BANK0);
1560 outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
1561
1562 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
1563 switch_bank(iobase, BANK2);
1564 outb(ECR1_DMANF|ECR1_EXT_SL, iobase+ECR1);
1565
1566 self->io.direction = IO_RECV;
1567 self->rx_buff.data = self->rx_buff.head;
1568
1569 /* Reset Rx FIFO. This will also flush the ST_FIFO */
1570 switch_bank(iobase, BANK0);
1571 outb(FCR_RXSR|FCR_FIFO_EN, iobase+FCR);
1572
1573 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1574 self->st_fifo.tail = self->st_fifo.head = 0;
1575
1576 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
1577 DMA_RX_MODE);
1578
1579 /* Enable DMA */
1580 switch_bank(iobase, BANK0);
1581 outb(inb(iobase+MCR)|MCR_DMA_EN, iobase+MCR);
1582
1583 /* Restore bank register */
1584 outb(bsr, iobase+BSR);
1585
1586 return 0;
1587}
1588
1589/*
1590 * Function nsc_ircc_dma_receive_complete (self)
1591 *
1592 * Finished with receiving frames
1593 *
1594 *
1595 */
1596static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1597{
1598 struct st_fifo *st_fifo;
1599 struct sk_buff *skb;
1600 __u8 status;
1601 __u8 bank;
1602 int len;
1603
1604 st_fifo = &self->st_fifo;
1605
1606 /* Save current bank */
1607 bank = inb(iobase+BSR);
1608
1609 /* Read all entries in status FIFO */
1610 switch_bank(iobase, BANK5);
1611 while ((status = inb(iobase+FRM_ST)) & FRM_ST_VLD) {
1612 /* We must empty the status FIFO no matter what */
1613 len = inb(iobase+RFLFL) | ((inb(iobase+RFLFH) & 0x1f) << 8);
1614
1615 if (st_fifo->tail >= MAX_RX_WINDOW) {
1616 IRDA_DEBUG(0, "%s(), window is full!\n", __FUNCTION__);
1617 continue;
1618 }
1619
1620 st_fifo->entries[st_fifo->tail].status = status;
1621 st_fifo->entries[st_fifo->tail].len = len;
1622 st_fifo->pending_bytes += len;
1623 st_fifo->tail++;
1624 st_fifo->len++;
1625 }
1626 /* Try to process all entries in status FIFO */
1627 while (st_fifo->len > 0) {
1628 /* Get first entry */
1629 status = st_fifo->entries[st_fifo->head].status;
1630 len = st_fifo->entries[st_fifo->head].len;
1631 st_fifo->pending_bytes -= len;
1632 st_fifo->head++;
1633 st_fifo->len--;
1634
1635 /* Check for errors */
1636 if (status & FRM_ST_ERR_MSK) {
1637 if (status & FRM_ST_LOST_FR) {
1638 /* Add number of lost frames to stats */
1639 self->stats.rx_errors += len;
1640 } else {
1641 /* Skip frame */
1642 self->stats.rx_errors++;
1643
1644 self->rx_buff.data += len;
1645
1646 if (status & FRM_ST_MAX_LEN)
1647 self->stats.rx_length_errors++;
1648
1649 if (status & FRM_ST_PHY_ERR)
1650 self->stats.rx_frame_errors++;
1651
1652 if (status & FRM_ST_BAD_CRC)
1653 self->stats.rx_crc_errors++;
1654 }
1655 /* The errors below can be reported in both cases */
1656 if (status & FRM_ST_OVR1)
1657 self->stats.rx_fifo_errors++;
1658
1659 if (status & FRM_ST_OVR2)
1660 self->stats.rx_fifo_errors++;
1661 } else {
1662 /*
1663 * First we must make sure that the frame we
1664 * want to deliver is all in main memory. If we
1665 * cannot tell, then we check if the Rx FIFO is
1666 * empty. If not then we will have to take a nap
1667 * and try again later.
1668 */
1669 if (st_fifo->pending_bytes < self->io.fifo_size) {
1670 switch_bank(iobase, BANK0);
1671 if (inb(iobase+LSR) & LSR_RXDA) {
1672 /* Put this entry back in fifo */
1673 st_fifo->head--;
1674 st_fifo->len++;
1675 st_fifo->pending_bytes += len;
1676 st_fifo->entries[st_fifo->head].status = status;
1677 st_fifo->entries[st_fifo->head].len = len;
1678 /*
1679 * DMA not finished yet, so try again
1680 * later, set timer value, resolution
1681 * 125 us
1682 */
1683 switch_bank(iobase, BANK4);
1684 outb(0x02, iobase+TMRL); /* x 125 us */
1685 outb(0x00, iobase+TMRH);
1686
1687 /* Start timer */
1688 outb(IRCR1_TMR_EN, iobase+IRCR1);
1689
1690 /* Restore bank register */
1691 outb(bank, iobase+BSR);
1692
1693 return FALSE; /* I'll be back! */
1694 }
1695 }
1696
1697 /*
1698 * Remember the time we received this frame, so we can
1699 * reduce the min turn time a bit since we will know
1700 * how much time we have used for protocol processing
1701 */
1702 do_gettimeofday(&self->stamp);
1703
1704 skb = dev_alloc_skb(len+1);
1705 if (skb == NULL) {
1706 IRDA_WARNING("%s(), memory squeeze, "
1707 "dropping frame.\n",
1708 __FUNCTION__);
1709 self->stats.rx_dropped++;
1710
1711 /* Restore bank register */
1712 outb(bank, iobase+BSR);
1713
1714 return FALSE;
1715 }
1716
1717 /* Make sure IP header gets aligned */
1718 skb_reserve(skb, 1);
1719
1720 /* Copy frame without CRC */
1721 if (self->io.speed < 4000000) {
1722 skb_put(skb, len-2);
1723 memcpy(skb->data, self->rx_buff.data, len-2);
1724 } else {
1725 skb_put(skb, len-4);
1726 memcpy(skb->data, self->rx_buff.data, len-4);
1727 }
1728
1729 /* Move to next frame */
1730 self->rx_buff.data += len;
1731 self->stats.rx_bytes += len;
1732 self->stats.rx_packets++;
1733
1734 skb->dev = self->netdev;
1735 skb->mac.raw = skb->data;
1736 skb->protocol = htons(ETH_P_IRDA);
1737 netif_rx(skb);
1738 self->netdev->last_rx = jiffies;
1739 }
1740 }
1741 /* Restore bank register */
1742 outb(bank, iobase+BSR);
1743
1744 return TRUE;
1745}
1746
1747/*
1748 * Function nsc_ircc_pio_receive (self)
1749 *
1750 * Receive all data in receiver FIFO
1751 *
1752 */
1753static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self)
1754{
1755 __u8 byte;
1756 int iobase;
1757
1758 iobase = self->io.fir_base;
1759
1760 /* Receive all characters in Rx FIFO */
1761 do {
1762 byte = inb(iobase+RXD);
1763 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
1764 byte);
1765 } while (inb(iobase+LSR) & LSR_RXDA); /* Data available */
1766}
1767
1768/*
1769 * Function nsc_ircc_sir_interrupt (self, eir)
1770 *
1771 * Handle SIR interrupt
1772 *
1773 */
1774static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
1775{
1776 int actual;
1777
1778 /* Check if transmit FIFO is low on data */
1779 if (eir & EIR_TXLDL_EV) {
1780 /* Write data left in transmit buffer */
1781 actual = nsc_ircc_pio_write(self->io.fir_base,
1782 self->tx_buff.data,
1783 self->tx_buff.len,
1784 self->io.fifo_size);
1785 self->tx_buff.data += actual;
1786 self->tx_buff.len -= actual;
1787
1788 self->io.direction = IO_XMIT;
1789
1790 /* Check if finished */
1791 if (self->tx_buff.len > 0)
1792 self->ier = IER_TXLDL_IE;
1793 else {
1794
1795 self->stats.tx_packets++;
1796 netif_wake_queue(self->netdev);
1797 self->ier = IER_TXEMP_IE;
1798 }
1799
1800 }
1801 /* Check if transmission has completed */
1802 if (eir & EIR_TXEMP_EV) {
1803 /* Turn around and get ready to receive some data */
1804 self->io.direction = IO_RECV;
1805 self->ier = IER_RXHDL_IE;
1806 /* Check if we need to change the speed?
1807 * Need to be after self->io.direction to avoid race with
1808 * nsc_ircc_hard_xmit_sir() - Jean II */
1809 if (self->new_speed) {
1810 IRDA_DEBUG(2, "%s(), Changing speed!\n", __FUNCTION__);
1811 self->ier = nsc_ircc_change_speed(self,
1812 self->new_speed);
1813 self->new_speed = 0;
1814 netif_wake_queue(self->netdev);
1815
1816 /* Check if we are going to FIR */
1817 if (self->io.speed > 115200) {
1818 /* No need to do anymore SIR stuff */
1819 return;
1820 }
1821 }
1822 }
1823
1824 /* Rx FIFO threshold or timeout */
1825 if (eir & EIR_RXHDL_EV) {
1826 nsc_ircc_pio_receive(self);
1827
1828 /* Keep receiving */
1829 self->ier = IER_RXHDL_IE;
1830 }
1831}
1832
1833/*
1834 * Function nsc_ircc_fir_interrupt (self, eir)
1835 *
1836 * Handle MIR/FIR interrupt
1837 *
1838 */
1839static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase,
1840 int eir)
1841{
1842 __u8 bank;
1843
1844 bank = inb(iobase+BSR);
1845
1846 /* Status FIFO event*/
1847 if (eir & EIR_SFIF_EV) {
1848 /* Check if DMA has finished */
1849 if (nsc_ircc_dma_receive_complete(self, iobase)) {
1850 /* Wait for next status FIFO interrupt */
1851 self->ier = IER_SFIF_IE;
1852 } else {
1853 self->ier = IER_SFIF_IE | IER_TMR_IE;
1854 }
1855 } else if (eir & EIR_TMR_EV) { /* Timer finished */
1856 /* Disable timer */
1857 switch_bank(iobase, BANK4);
1858 outb(0, iobase+IRCR1);
1859
1860 /* Clear timer event */
1861 switch_bank(iobase, BANK0);
1862 outb(ASCR_CTE, iobase+ASCR);
1863
1864 /* Check if this is a Tx timer interrupt */
1865 if (self->io.direction == IO_XMIT) {
1866 nsc_ircc_dma_xmit(self, iobase);
1867
1868 /* Interrupt on DMA */
1869 self->ier = IER_DMA_IE;
1870 } else {
1871 /* Check (again) if DMA has finished */
1872 if (nsc_ircc_dma_receive_complete(self, iobase)) {
1873 self->ier = IER_SFIF_IE;
1874 } else {
1875 self->ier = IER_SFIF_IE | IER_TMR_IE;
1876 }
1877 }
1878 } else if (eir & EIR_DMA_EV) {
1879 /* Finished with all transmissions? */
1880 if (nsc_ircc_dma_xmit_complete(self)) {
1881 if(self->new_speed != 0) {
1882 /* As we stop the Tx queue, the speed change
1883 * need to be done when the Tx fifo is
1884 * empty. Ask for a Tx done interrupt */
1885 self->ier = IER_TXEMP_IE;
1886 } else {
1887 /* Check if there are more frames to be
1888 * transmitted */
1889 if (irda_device_txqueue_empty(self->netdev)) {
1890 /* Prepare for receive */
1891 nsc_ircc_dma_receive(self);
1892 self->ier = IER_SFIF_IE;
1893 } else
1894 IRDA_WARNING("%s(), potential "
1895 "Tx queue lockup !\n",
1896 __FUNCTION__);
1897 }
1898 } else {
1899 /* Not finished yet, so interrupt on DMA again */
1900 self->ier = IER_DMA_IE;
1901 }
1902 } else if (eir & EIR_TXEMP_EV) {
1903 /* The Tx FIFO has totally drained out, so now we can change
1904 * the speed... - Jean II */
1905 self->ier = nsc_ircc_change_speed(self, self->new_speed);
1906 self->new_speed = 0;
1907 netif_wake_queue(self->netdev);
1908 /* Note : nsc_ircc_change_speed() restarted Rx fifo */
1909 }
1910
1911 outb(bank, iobase+BSR);
1912}
1913
1914/*
1915 * Function nsc_ircc_interrupt (irq, dev_id, regs)
1916 *
1917 * An interrupt from the chip has arrived. Time to do some work
1918 *
1919 */
1920static irqreturn_t nsc_ircc_interrupt(int irq, void *dev_id,
1921 struct pt_regs *regs)
1922{
1923 struct net_device *dev = (struct net_device *) dev_id;
1924 struct nsc_ircc_cb *self;
1925 __u8 bsr, eir;
1926 int iobase;
1927
1928 if (!dev) {
1929 IRDA_WARNING("%s: irq %d for unknown device.\n",
1930 driver_name, irq);
1931 return IRQ_NONE;
1932 }
1933 self = (struct nsc_ircc_cb *) dev->priv;
1934
1935 spin_lock(&self->lock);
1936
1937 iobase = self->io.fir_base;
1938
1939 bsr = inb(iobase+BSR); /* Save current bank */
1940
1941 switch_bank(iobase, BANK0);
1942 self->ier = inb(iobase+IER);
1943 eir = inb(iobase+EIR) & self->ier; /* Mask out the interesting ones */
1944
1945 outb(0, iobase+IER); /* Disable interrupts */
1946
1947 if (eir) {
1948 /* Dispatch interrupt handler for the current speed */
1949 if (self->io.speed > 115200)
1950 nsc_ircc_fir_interrupt(self, iobase, eir);
1951 else
1952 nsc_ircc_sir_interrupt(self, eir);
1953 }
1954
1955 outb(self->ier, iobase+IER); /* Restore interrupts */
1956 outb(bsr, iobase+BSR); /* Restore bank register */
1957
1958 spin_unlock(&self->lock);
1959 return IRQ_RETVAL(eir);
1960}
1961
1962/*
1963 * Function nsc_ircc_is_receiving (self)
1964 *
1965 * Return TRUE is we are currently receiving a frame
1966 *
1967 */
1968static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self)
1969{
1970 unsigned long flags;
1971 int status = FALSE;
1972 int iobase;
1973 __u8 bank;
1974
1975 IRDA_ASSERT(self != NULL, return FALSE;);
1976
1977 spin_lock_irqsave(&self->lock, flags);
1978
1979 if (self->io.speed > 115200) {
1980 iobase = self->io.fir_base;
1981
1982 /* Check if rx FIFO is not empty */
1983 bank = inb(iobase+BSR);
1984 switch_bank(iobase, BANK2);
1985 if ((inb(iobase+RXFLV) & 0x3f) != 0) {
1986 /* We are receiving something */
1987 status = TRUE;
1988 }
1989 outb(bank, iobase+BSR);
1990 } else
1991 status = (self->rx_buff.state != OUTSIDE_FRAME);
1992
1993 spin_unlock_irqrestore(&self->lock, flags);
1994
1995 return status;
1996}
1997
1998/*
1999 * Function nsc_ircc_net_open (dev)
2000 *
2001 * Start the device
2002 *
2003 */
2004static int nsc_ircc_net_open(struct net_device *dev)
2005{
2006 struct nsc_ircc_cb *self;
2007 int iobase;
2008 char hwname[32];
2009 __u8 bank;
2010
2011 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
2012
2013 IRDA_ASSERT(dev != NULL, return -1;);
2014 self = (struct nsc_ircc_cb *) dev->priv;
2015
2016 IRDA_ASSERT(self != NULL, return 0;);
2017
2018 iobase = self->io.fir_base;
2019
2020 if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) {
2021 IRDA_WARNING("%s, unable to allocate irq=%d\n",
2022 driver_name, self->io.irq);
2023 return -EAGAIN;
2024 }
2025 /*
2026 * Always allocate the DMA channel after the IRQ, and clean up on
2027 * failure.
2028 */
2029 if (request_dma(self->io.dma, dev->name)) {
2030 IRDA_WARNING("%s, unable to allocate dma=%d\n",
2031 driver_name, self->io.dma);
2032 free_irq(self->io.irq, dev);
2033 return -EAGAIN;
2034 }
2035
2036 /* Save current bank */
2037 bank = inb(iobase+BSR);
2038
2039 /* turn on interrupts */
2040 switch_bank(iobase, BANK0);
2041 outb(IER_LS_IE | IER_RXHDL_IE, iobase+IER);
2042
2043 /* Restore bank register */
2044 outb(bank, iobase+BSR);
2045
2046 /* Ready to play! */
2047 netif_start_queue(dev);
2048
2049 /* Give self a hardware name */
2050 sprintf(hwname, "NSC-FIR @ 0x%03x", self->io.fir_base);
2051
2052 /*
2053 * Open new IrLAP layer instance, now that everything should be
2054 * initialized properly
2055 */
2056 self->irlap = irlap_open(dev, &self->qos, hwname);
2057
2058 return 0;
2059}
2060
2061/*
2062 * Function nsc_ircc_net_close (dev)
2063 *
2064 * Stop the device
2065 *
2066 */
2067static int nsc_ircc_net_close(struct net_device *dev)
2068{
2069 struct nsc_ircc_cb *self;
2070 int iobase;
2071 __u8 bank;
2072
2073 IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
2074
2075 IRDA_ASSERT(dev != NULL, return -1;);
2076
2077 self = (struct nsc_ircc_cb *) dev->priv;
2078 IRDA_ASSERT(self != NULL, return 0;);
2079
2080 /* Stop device */
2081 netif_stop_queue(dev);
2082
2083 /* Stop and remove instance of IrLAP */
2084 if (self->irlap)
2085 irlap_close(self->irlap);
2086 self->irlap = NULL;
2087
2088 iobase = self->io.fir_base;
2089
2090 disable_dma(self->io.dma);
2091
2092 /* Save current bank */
2093 bank = inb(iobase+BSR);
2094
2095 /* Disable interrupts */
2096 switch_bank(iobase, BANK0);
2097 outb(0, iobase+IER);
2098
2099 free_irq(self->io.irq, dev);
2100 free_dma(self->io.dma);
2101
2102 /* Restore bank register */
2103 outb(bank, iobase+BSR);
2104
2105 return 0;
2106}
2107
2108/*
2109 * Function nsc_ircc_net_ioctl (dev, rq, cmd)
2110 *
2111 * Process IOCTL commands for this device
2112 *
2113 */
2114static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2115{
2116 struct if_irda_req *irq = (struct if_irda_req *) rq;
2117 struct nsc_ircc_cb *self;
2118 unsigned long flags;
2119 int ret = 0;
2120
2121 IRDA_ASSERT(dev != NULL, return -1;);
2122
2123 self = dev->priv;
2124
2125 IRDA_ASSERT(self != NULL, return -1;);
2126
2127 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
2128
2129 switch (cmd) {
2130 case SIOCSBANDWIDTH: /* Set bandwidth */
2131 if (!capable(CAP_NET_ADMIN)) {
2132 ret = -EPERM;
2133 break;
2134 }
2135 spin_lock_irqsave(&self->lock, flags);
2136 nsc_ircc_change_speed(self, irq->ifr_baudrate);
2137 spin_unlock_irqrestore(&self->lock, flags);
2138 break;
2139 case SIOCSMEDIABUSY: /* Set media busy */
2140 if (!capable(CAP_NET_ADMIN)) {
2141 ret = -EPERM;
2142 break;
2143 }
2144 irda_device_set_media_busy(self->netdev, TRUE);
2145 break;
2146 case SIOCGRECEIVING: /* Check if we are receiving right now */
2147 /* This is already protected */
2148 irq->ifr_receiving = nsc_ircc_is_receiving(self);
2149 break;
2150 default:
2151 ret = -EOPNOTSUPP;
2152 }
2153 return ret;
2154}
2155
2156static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev)
2157{
2158 struct nsc_ircc_cb *self = (struct nsc_ircc_cb *) dev->priv;
2159
2160 return &self->stats;
2161}
2162
2163static void nsc_ircc_suspend(struct nsc_ircc_cb *self)
2164{
2165 IRDA_MESSAGE("%s, Suspending\n", driver_name);
2166
2167 if (self->io.suspended)
2168 return;
2169
2170 nsc_ircc_net_close(self->netdev);
2171
2172 self->io.suspended = 1;
2173}
2174
2175static void nsc_ircc_wakeup(struct nsc_ircc_cb *self)
2176{
2177 if (!self->io.suspended)
2178 return;
2179
2180 nsc_ircc_setup(&self->io);
2181 nsc_ircc_net_open(self->netdev);
2182
2183 IRDA_MESSAGE("%s, Waking up\n", driver_name);
2184
2185 self->io.suspended = 0;
2186}
2187
2188static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
2189{
2190 struct nsc_ircc_cb *self = (struct nsc_ircc_cb*) dev->data;
2191 if (self) {
2192 switch (rqst) {
2193 case PM_SUSPEND:
2194 nsc_ircc_suspend(self);
2195 break;
2196 case PM_RESUME:
2197 nsc_ircc_wakeup(self);
2198 break;
2199 }
2200 }
2201 return 0;
2202}
2203
2204MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
2205MODULE_DESCRIPTION("NSC IrDA Device Driver");
2206MODULE_LICENSE("GPL");
2207
2208
2209module_param(qos_mtt_bits, int, 0);
2210MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
2211module_param_array(io, int, NULL, 0);
2212MODULE_PARM_DESC(io, "Base I/O addresses");
2213module_param_array(irq, int, NULL, 0);
2214MODULE_PARM_DESC(irq, "IRQ lines");
2215module_param_array(dma, int, NULL, 0);
2216MODULE_PARM_DESC(dma, "DMA channels");
2217module_param(dongle_id, int, 0);
2218MODULE_PARM_DESC(dongle_id, "Type-id of used dongle");
2219
2220module_init(nsc_ircc_init);
2221module_exit(nsc_ircc_cleanup);
2222
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
new file mode 100644
index 000000000000..6edf7e514624
--- /dev/null
+++ b/drivers/net/irda/nsc-ircc.h
@@ -0,0 +1,280 @@
1/*********************************************************************
2 *
3 * Filename: nsc-ircc.h
4 * Version:
5 * Description:
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Fri Nov 13 14:37:40 1998
9 * Modified at: Sun Jan 23 17:47:00 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com>
14 * Copyright (c) 1998 Actisys Corp., www.actisys.com
15 * All Rights Reserved
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
21 *
22 * Neither Dag Brattli nor University of Tromsų admit liability nor
23 * provide warranty for any of this software. This material is
24 * provided "AS-IS" and at no charge.
25 *
26 ********************************************************************/
27
28#ifndef NSC_IRCC_H
29#define NSC_IRCC_H
30
31#include <linux/time.h>
32
33#include <linux/spinlock.h>
34#include <linux/pm.h>
35#include <linux/types.h>
36#include <asm/io.h>
37
38/* DMA modes needed */
39#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
40#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
41
42/* Config registers for the '108 */
43#define CFG_108_BAIC 0x00
44#define CFG_108_CSRT 0x01
45#define CFG_108_MCTL 0x02
46
47/* Config registers for the '338 */
48#define CFG_338_FER 0x00
49#define CFG_338_FAR 0x01
50#define CFG_338_PTR 0x02
51#define CFG_338_PNP0 0x1b
52#define CFG_338_PNP1 0x1c
53#define CFG_338_PNP3 0x4f
54
55/* Config registers for the '39x (in the logical device bank) */
56#define CFG_39X_LDN 0x07 /* Logical device number (Super I/O bank) */
57#define CFG_39X_SIOCF1 0x21 /* SuperI/O Config */
58#define CFG_39X_ACT 0x30 /* Device activation */
59#define CFG_39X_BASEH 0x60 /* Device base address (high bits) */
60#define CFG_39X_BASEL 0x61 /* Device base address (low bits) */
61#define CFG_39X_IRQNUM 0x70 /* Interrupt number & wake up enable */
62#define CFG_39X_IRQSEL 0x71 /* Interrupt select (edge/level + polarity) */
63#define CFG_39X_DMA0 0x74 /* DMA 0 configuration */
64#define CFG_39X_DMA1 0x75 /* DMA 1 configuration */
65#define CFG_39X_SPC 0xF0 /* Serial port configuration register */
66
67/* Flags for configuration register CRF0 */
68#define APEDCRC 0x02
69#define ENBNKSEL 0x01
70
71/* Set 0 */
72#define TXD 0x00 /* Transmit data port */
73#define RXD 0x00 /* Receive data port */
74
75/* Register 1 */
76#define IER 0x01 /* Interrupt Enable Register*/
77#define IER_RXHDL_IE 0x01 /* Receiver high data level interrupt */
78#define IER_TXLDL_IE 0x02 /* Transeiver low data level interrupt */
79#define IER_LS_IE 0x04//* Link Status Interrupt */
80#define IER_ETXURI 0x04 /* Tx underrun */
81#define IER_DMA_IE 0x10 /* DMA finished interrupt */
82#define IER_TXEMP_IE 0x20
83#define IER_SFIF_IE 0x40 /* Frame status FIFO intr */
84#define IER_TMR_IE 0x80 /* Timer event */
85
86#define FCR 0x02 /* (write only) */
87#define FCR_FIFO_EN 0x01 /* Enable FIFO's */
88#define FCR_RXSR 0x02 /* Rx FIFO soft reset */
89#define FCR_TXSR 0x04 /* Tx FIFO soft reset */
90#define FCR_RXTH 0x40 /* Rx FIFO threshold (set to 16) */
91#define FCR_TXTH 0x20 /* Tx FIFO threshold (set to 17) */
92
93#define EIR 0x02 /* (read only) */
94#define EIR_RXHDL_EV 0x01
95#define EIR_TXLDL_EV 0x02
96#define EIR_LS_EV 0x04
97#define EIR_DMA_EV 0x10
98#define EIR_TXEMP_EV 0x20
99#define EIR_SFIF_EV 0x40
100#define EIR_TMR_EV 0x80
101
102#define LCR 0x03 /* Link control register */
103#define LCR_WLS_8 0x03 /* 8 bits */
104
105#define BSR 0x03 /* Bank select register */
106#define BSR_BKSE 0x80
107#define BANK0 LCR_WLS_8 /* Must make sure that we set 8N1 */
108#define BANK1 0x80
109#define BANK2 0xe0
110#define BANK3 0xe4
111#define BANK4 0xe8
112#define BANK5 0xec
113#define BANK6 0xf0
114#define BANK7 0xf4
115
116#define MCR 0x04 /* Mode Control Register */
117#define MCR_MODE_MASK ~(0xd0)
118#define MCR_UART 0x00
119#define MCR_RESERVED 0x20
120#define MCR_SHARP_IR 0x40
121#define MCR_SIR 0x60
122#define MCR_MIR 0x80
123#define MCR_FIR 0xa0
124#define MCR_CEIR 0xb0
125#define MCR_IR_PLS 0x10
126#define MCR_DMA_EN 0x04
127#define MCR_EN_IRQ 0x08
128#define MCR_TX_DFR 0x08
129
130#define LSR 0x05 /* Link status register */
131#define LSR_RXDA 0x01 /* Receiver data available */
132#define LSR_TXRDY 0x20 /* Transmitter ready */
133#define LSR_TXEMP 0x40 /* Transmitter empty */
134
135#define ASCR 0x07 /* Auxillary Status and Control Register */
136#define ASCR_RXF_TOUT 0x01 /* Rx FIFO timeout */
137#define ASCR_FEND_INF 0x02 /* Frame end bytes in rx FIFO */
138#define ASCR_S_EOT 0x04 /* Set end of transmission */
139#define ASCT_RXBSY 0x20 /* Rx busy */
140#define ASCR_TXUR 0x40 /* Transeiver underrun */
141#define ASCR_CTE 0x80 /* Clear timer event */
142
143/* Bank 2 */
144#define BGDL 0x00 /* Baud Generator Divisor Port (Low Byte) */
145#define BGDH 0x01 /* Baud Generator Divisor Port (High Byte) */
146
147#define ECR1 0x02 /* Extended Control Register 1 */
148#define ECR1_EXT_SL 0x01 /* Extended Mode Select */
149#define ECR1_DMANF 0x02 /* DMA Fairness */
150#define ECR1_DMATH 0x04 /* DMA Threshold */
151#define ECR1_DMASWP 0x08 /* DMA Swap */
152
153#define EXCR2 0x04
154#define EXCR2_TFSIZ 0x01 /* Rx FIFO size = 32 */
155#define EXCR2_RFSIZ 0x04 /* Tx FIFO size = 32 */
156
157#define TXFLV 0x06 /* Tx FIFO level */
158#define RXFLV 0x07 /* Rx FIFO level */
159
160/* Bank 3 */
161#define MID 0x00
162
163/* Bank 4 */
164#define TMRL 0x00 /* Timer low byte */
165#define TMRH 0x01 /* Timer high byte */
166#define IRCR1 0x02 /* Infrared control register 1 */
167#define IRCR1_TMR_EN 0x01 /* Timer enable */
168
169#define TFRLL 0x04
170#define TFRLH 0x05
171#define RFRLL 0x06
172#define RFRLH 0x07
173
174/* Bank 5 */
175#define IRCR2 0x04 /* Infrared control register 2 */
176#define IRCR2_MDRS 0x04 /* MIR data rate select */
177#define IRCR2_FEND_MD 0x20 /* */
178
179#define FRM_ST 0x05 /* Frame status FIFO */
180#define FRM_ST_VLD 0x80 /* Frame status FIFO data valid */
181#define FRM_ST_ERR_MSK 0x5f
182#define FRM_ST_LOST_FR 0x40 /* Frame lost */
183#define FRM_ST_MAX_LEN 0x10 /* Max frame len exceeded */
184#define FRM_ST_PHY_ERR 0x08 /* Physical layer error */
185#define FRM_ST_BAD_CRC 0x04
186#define FRM_ST_OVR1 0x02 /* Rx FIFO overrun */
187#define FRM_ST_OVR2 0x01 /* Frame status FIFO overrun */
188
189#define RFLFL 0x06
190#define RFLFH 0x07
191
192/* Bank 6 */
193#define IR_CFG2 0x00
194#define IR_CFG2_DIS_CRC 0x02
195
196/* Bank 7 */
197#define IRM_CR 0x07 /* Infrared module control register */
198#define IRM_CR_IRX_MSL 0x40
199#define IRM_CR_AF_MNT 0x80 /* Automatic format */
200
201/* NSC chip information */
202struct nsc_chip {
203 char *name; /* Name of chipset */
204 int cfg[3]; /* Config registers */
205 u_int8_t cid_index; /* Chip identification index reg */
206 u_int8_t cid_value; /* Chip identification expected value */
207 u_int8_t cid_mask; /* Chip identification revision mask */
208
209 /* Functions for probing and initializing the specific chip */
210 int (*probe)(struct nsc_chip *chip, chipio_t *info);
211 int (*init)(struct nsc_chip *chip, chipio_t *info);
212};
213typedef struct nsc_chip nsc_chip_t;
214
215/* For storing entries in the status FIFO */
216struct st_fifo_entry {
217 int status;
218 int len;
219};
220
221#define MAX_TX_WINDOW 7
222#define MAX_RX_WINDOW 7
223
224struct st_fifo {
225 struct st_fifo_entry entries[MAX_RX_WINDOW];
226 int pending_bytes;
227 int head;
228 int tail;
229 int len;
230};
231
232struct frame_cb {
233 void *start; /* Start of frame in DMA mem */
234 int len; /* Lenght of frame in DMA mem */
235};
236
237struct tx_fifo {
238 struct frame_cb queue[MAX_TX_WINDOW]; /* Info about frames in queue */
239 int ptr; /* Currently being sent */
240 int len; /* Lenght of queue */
241 int free; /* Next free slot */
242 void *tail; /* Next free start in DMA mem */
243};
244
245/* Private data for each instance */
246struct nsc_ircc_cb {
247 struct st_fifo st_fifo; /* Info about received frames */
248 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
249
250 struct net_device *netdev; /* Yes! we are some kind of netdevice */
251 struct net_device_stats stats;
252
253 struct irlap_cb *irlap; /* The link layer we are binded to */
254 struct qos_info qos; /* QoS capabilities for this device */
255
256 chipio_t io; /* IrDA controller information */
257 iobuff_t tx_buff; /* Transmit buffer */
258 iobuff_t rx_buff; /* Receive buffer */
259 dma_addr_t tx_buff_dma;
260 dma_addr_t rx_buff_dma;
261
262 __u8 ier; /* Interrupt enable register */
263
264 struct timeval stamp;
265 struct timeval now;
266
267 spinlock_t lock; /* For serializing operations */
268
269 __u32 new_speed;
270 int index; /* Instance index */
271
272 struct pm_dev *dev;
273};
274
275static inline void switch_bank(int iobase, int bank)
276{
277 outb(bank, iobase+BSR);
278}
279
280#endif /* NSC_IRCC_H */
diff --git a/drivers/net/irda/old_belkin-sir.c b/drivers/net/irda/old_belkin-sir.c
new file mode 100644
index 000000000000..8c22c7374a23
--- /dev/null
+++ b/drivers/net/irda/old_belkin-sir.c
@@ -0,0 +1,156 @@
1/*********************************************************************
2 *
3 * Filename: old_belkin.c
4 * Version: 1.1
5 * Description: Driver for the Belkin (old) SmartBeam dongle
6 * Status: Experimental...
7 * Author: Jean Tourrilhes <jt@hpl.hp.com>
8 * Created at: 22/11/99
9 * Modified at: Fri Dec 17 09:13:32 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1999 Jean Tourrilhes, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
27 * MA 02111-1307 USA
28 *
29 ********************************************************************/
30
31#include <linux/module.h>
32#include <linux/delay.h>
33#include <linux/init.h>
34
35#include <net/irda/irda.h>
36// #include <net/irda/irda_device.h>
37
38#include "sir-dev.h"
39
40/*
41 * Belkin is selling a dongle called the SmartBeam.
42 * In fact, there is two hardware version of this dongle, of course with
43 * the same name and looking the exactly same (grrr...).
44 * I guess that I've got the old one, because inside I don't have
45 * a jumper for IrDA/ASK...
46 *
47 * As far as I can make it from info on their web site, the old dongle
48 * support only 9600 b/s, which make our life much simpler as far as
49 * the driver is concerned, but you might not like it very much ;-)
50 * The new SmartBeam does 115 kb/s, and I've not tested it...
51 *
52 * Belkin claim that the correct driver for the old dongle (in Windows)
53 * is the generic Parallax 9500a driver, but the Linux LiteLink driver
54 * fails for me (probably because Linux-IrDA doesn't rate fallback),
55 * so I created this really dumb driver...
56 *
57 * In fact, this driver doesn't do much. The only thing it does is to
58 * prevent Linux-IrDA to use any other speed than 9600 b/s ;-) This
59 * driver is called "old_belkin" so that when the new SmartBeam is supported
60 * its driver can be called "belkin" instead of "new_belkin".
61 *
62 * Note : this driver was written without any info/help from Belkin,
63 * so a lot of info here might be totally wrong. Blame me ;-)
64 */
65
66static int old_belkin_open(struct sir_dev *dev);
67static int old_belkin_close(struct sir_dev *dev);
68static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed);
69static int old_belkin_reset(struct sir_dev *dev);
70
71static struct dongle_driver old_belkin = {
72 .owner = THIS_MODULE,
73 .driver_name = "Old Belkin SmartBeam",
74 .type = IRDA_OLD_BELKIN_DONGLE,
75 .open = old_belkin_open,
76 .close = old_belkin_close,
77 .reset = old_belkin_reset,
78 .set_speed = old_belkin_change_speed,
79};
80
81static int __init old_belkin_sir_init(void)
82{
83 return irda_register_dongle(&old_belkin);
84}
85
86static void __exit old_belkin_sir_cleanup(void)
87{
88 irda_unregister_dongle(&old_belkin);
89}
90
91static int old_belkin_open(struct sir_dev *dev)
92{
93 struct qos_info *qos = &dev->qos;
94
95 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
96
97 /* Power on dongle */
98 sirdev_set_dtr_rts(dev, TRUE, TRUE);
99
100 /* Not too fast, please... */
101 qos->baud_rate.bits &= IR_9600;
102 /* Needs at least 10 ms (totally wild guess, can do probably better) */
103 qos->min_turn_time.bits = 0x01;
104 irda_qos_bits_to_value(qos);
105
106 /* irda thread waits 50 msec for power settling */
107
108 return 0;
109}
110
111static int old_belkin_close(struct sir_dev *dev)
112{
113 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
114
115 /* Power off dongle */
116 sirdev_set_dtr_rts(dev, FALSE, FALSE);
117
118 return 0;
119}
120
121/*
122 * Function old_belkin_change_speed (task)
123 *
124 * With only one speed available, not much to do...
125 */
126static int old_belkin_change_speed(struct sir_dev *dev, unsigned speed)
127{
128 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
129
130 dev->speed = 9600;
131 return (speed==dev->speed) ? 0 : -EINVAL;
132}
133
134/*
135 * Function old_belkin_reset (task)
136 *
137 * Reset the Old-Belkin type dongle.
138 *
139 */
140static int old_belkin_reset(struct sir_dev *dev)
141{
142 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
143
144 /* This dongles speed "defaults" to 9600 bps ;-) */
145 dev->speed = 9600;
146
147 return 0;
148}
149
150MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>");
151MODULE_DESCRIPTION("Belkin (old) SmartBeam dongle driver");
152MODULE_LICENSE("GPL");
153MODULE_ALIAS("irda-dongle-7"); /* IRDA_OLD_BELKIN_DONGLE */
154
155module_init(old_belkin_sir_init);
156module_exit(old_belkin_sir_cleanup);
diff --git a/drivers/net/irda/old_belkin.c b/drivers/net/irda/old_belkin.c
new file mode 100644
index 000000000000..26f81fd28371
--- /dev/null
+++ b/drivers/net/irda/old_belkin.c
@@ -0,0 +1,164 @@
1/*********************************************************************
2 *
3 * Filename: old_belkin.c
4 * Version: 1.1
5 * Description: Driver for the Belkin (old) SmartBeam dongle
6 * Status: Experimental...
7 * Author: Jean Tourrilhes <jt@hpl.hp.com>
8 * Created at: 22/11/99
9 * Modified at: Fri Dec 17 09:13:32 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1999 Jean Tourrilhes, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
27 * MA 02111-1307 USA
28 *
29 ********************************************************************/
30
31#include <linux/module.h>
32#include <linux/delay.h>
33#include <linux/tty.h>
34#include <linux/init.h>
35
36#include <net/irda/irda.h>
37#include <net/irda/irda_device.h>
38
39/*
40 * Belkin is selling a dongle called the SmartBeam.
41 * In fact, there is two hardware version of this dongle, of course with
42 * the same name and looking the exactly same (grrr...).
43 * I guess that I've got the old one, because inside I don't have
44 * a jumper for IrDA/ASK...
45 *
46 * As far as I can make it from info on their web site, the old dongle
47 * support only 9600 b/s, which make our life much simpler as far as
48 * the driver is concerned, but you might not like it very much ;-)
49 * The new SmartBeam does 115 kb/s, and I've not tested it...
50 *
51 * Belkin claim that the correct driver for the old dongle (in Windows)
52 * is the generic Parallax 9500a driver, but the Linux LiteLink driver
53 * fails for me (probably because Linux-IrDA doesn't rate fallback),
54 * so I created this really dumb driver...
55 *
56 * In fact, this driver doesn't do much. The only thing it does is to
57 * prevent Linux-IrDA to use any other speed than 9600 b/s ;-) This
58 * driver is called "old_belkin" so that when the new SmartBeam is supported
59 * its driver can be called "belkin" instead of "new_belkin".
60 *
61 * Note : this driver was written without any info/help from Belkin,
62 * so a lot of info here might be totally wrong. Blame me ;-)
63 */
64
65/* Let's guess */
66#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
67
68static void old_belkin_open(dongle_t *self, struct qos_info *qos);
69static void old_belkin_close(dongle_t *self);
70static int old_belkin_change_speed(struct irda_task *task);
71static int old_belkin_reset(struct irda_task *task);
72
73/* These are the baudrates supported */
74/* static __u32 baud_rates[] = { 9600 }; */
75
76static struct dongle_reg dongle = {
77 .type = IRDA_OLD_BELKIN_DONGLE,
78 .open = old_belkin_open,
79 .close = old_belkin_close,
80 .reset = old_belkin_reset,
81 .change_speed = old_belkin_change_speed,
82 .owner = THIS_MODULE,
83};
84
85static int __init old_belkin_init(void)
86{
87 return irda_device_register_dongle(&dongle);
88}
89
90static void __exit old_belkin_cleanup(void)
91{
92 irda_device_unregister_dongle(&dongle);
93}
94
95static void old_belkin_open(dongle_t *self, struct qos_info *qos)
96{
97 /* Not too fast, please... */
98 qos->baud_rate.bits &= IR_9600;
99 /* Needs at least 10 ms (totally wild guess, can do probably better) */
100 qos->min_turn_time.bits = 0x01;
101}
102
103static void old_belkin_close(dongle_t *self)
104{
105 /* Power off dongle */
106 self->set_dtr_rts(self->dev, FALSE, FALSE);
107}
108
109/*
110 * Function old_belkin_change_speed (task)
111 *
112 * With only one speed available, not much to do...
113 */
114static int old_belkin_change_speed(struct irda_task *task)
115{
116 irda_task_next_state(task, IRDA_TASK_DONE);
117
118 return 0;
119}
120
121/*
122 * Function old_belkin_reset (task)
123 *
124 * Reset the Old-Belkin type dongle.
125 *
126 */
127static int old_belkin_reset(struct irda_task *task)
128{
129 dongle_t *self = (dongle_t *) task->instance;
130
131 /* Power on dongle */
132 self->set_dtr_rts(self->dev, TRUE, TRUE);
133
134 /* Sleep a minimum of 15 us */
135 udelay(MIN_DELAY);
136
137 /* This dongles speed "defaults" to 9600 bps ;-) */
138 self->speed = 9600;
139
140 irda_task_next_state(task, IRDA_TASK_DONE);
141
142 return 0;
143}
144
145MODULE_AUTHOR("Jean Tourrilhes <jt@hpl.hp.com>");
146MODULE_DESCRIPTION("Belkin (old) SmartBeam dongle driver");
147MODULE_LICENSE("GPL");
148MODULE_ALIAS("irda-dongle-7"); /* IRDA_OLD_BELKIN_DONGLE */
149
150/*
151 * Function init_module (void)
152 *
153 * Initialize Old-Belkin module
154 *
155 */
156module_init(old_belkin_init);
157
158/*
159 * Function cleanup_module (void)
160 *
161 * Cleanup Old-Belkin module
162 *
163 */
164module_exit(old_belkin_cleanup);
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
new file mode 100644
index 000000000000..89f5096cab74
--- /dev/null
+++ b/drivers/net/irda/sa1100_ir.c
@@ -0,0 +1,1045 @@
1/*
2 * linux/drivers/net/irda/sa1100_ir.c
3 *
4 * Copyright (C) 2000-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Infra-red driver for the StrongARM SA1100 embedded microprocessor
11 *
12 * Note that we don't have to worry about the SA1111's DMA bugs in here,
13 * so we use the straight forward dma_map_* functions with a null pointer.
14 *
15 * This driver takes one kernel command line parameter, sa1100ir=, with
16 * the following options:
17 * max_rate:baudrate - set the maximum baud rate
18 * power_leve:level - set the transmitter power level
19 * tx_lpm:0|1 - set transmit low power mode
20 */
21#include <linux/config.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/types.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/netdevice.h>
28#include <linux/slab.h>
29#include <linux/rtnetlink.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/device.h>
33#include <linux/dma-mapping.h>
34
35#include <net/irda/irda.h>
36#include <net/irda/wrapper.h>
37#include <net/irda/irda_device.h>
38
39#include <asm/irq.h>
40#include <asm/dma.h>
41#include <asm/hardware.h>
42#include <asm/mach/irda.h>
43
44static int power_level = 3;
45static int tx_lpm;
46static int max_rate = 4000000;
47
48struct sa1100_irda {
49 unsigned char hscr0;
50 unsigned char utcr4;
51 unsigned char power;
52 unsigned char open;
53
54 int speed;
55 int newspeed;
56
57 struct sk_buff *txskb;
58 struct sk_buff *rxskb;
59 dma_addr_t txbuf_dma;
60 dma_addr_t rxbuf_dma;
61 dma_regs_t *txdma;
62 dma_regs_t *rxdma;
63
64 struct net_device_stats stats;
65 struct device *dev;
66 struct irda_platform_data *pdata;
67 struct irlap_cb *irlap;
68 struct qos_info qos;
69
70 iobuff_t tx_buff;
71 iobuff_t rx_buff;
72};
73
74#define IS_FIR(si) ((si)->speed >= 4000000)
75
76#define HPSIR_MAX_RXLEN 2047
77
78/*
79 * Allocate and map the receive buffer, unless it is already allocated.
80 */
81static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
82{
83 if (si->rxskb)
84 return 0;
85
86 si->rxskb = alloc_skb(HPSIR_MAX_RXLEN + 1, GFP_ATOMIC);
87
88 if (!si->rxskb) {
89 printk(KERN_ERR "sa1100_ir: out of memory for RX SKB\n");
90 return -ENOMEM;
91 }
92
93 /*
94 * Align any IP headers that may be contained
95 * within the frame.
96 */
97 skb_reserve(si->rxskb, 1);
98
99 si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data,
100 HPSIR_MAX_RXLEN,
101 DMA_FROM_DEVICE);
102 return 0;
103}
104
105/*
106 * We want to get here as soon as possible, and get the receiver setup.
107 * We use the existing buffer.
108 */
109static void sa1100_irda_rx_dma_start(struct sa1100_irda *si)
110{
111 if (!si->rxskb) {
112 printk(KERN_ERR "sa1100_ir: rx buffer went missing\n");
113 return;
114 }
115
116 /*
117 * First empty receive FIFO
118 */
119 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
120
121 /*
122 * Enable the DMA, receiver and receive interrupt.
123 */
124 sa1100_clear_dma(si->rxdma);
125 sa1100_start_dma(si->rxdma, si->rxbuf_dma, HPSIR_MAX_RXLEN);
126 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_RXE;
127}
128
129/*
130 * Set the IrDA communications speed.
131 */
132static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
133{
134 unsigned long flags;
135 int brd, ret = -EINVAL;
136
137 switch (speed) {
138 case 9600: case 19200: case 38400:
139 case 57600: case 115200:
140 brd = 3686400 / (16 * speed) - 1;
141
142 /*
143 * Stop the receive DMA.
144 */
145 if (IS_FIR(si))
146 sa1100_stop_dma(si->rxdma);
147
148 local_irq_save(flags);
149
150 Ser2UTCR3 = 0;
151 Ser2HSCR0 = HSCR0_UART;
152
153 Ser2UTCR1 = brd >> 8;
154 Ser2UTCR2 = brd;
155
156 /*
157 * Clear status register
158 */
159 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
160 Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
161
162 if (si->pdata->set_speed)
163 si->pdata->set_speed(si->dev, speed);
164
165 si->speed = speed;
166
167 local_irq_restore(flags);
168 ret = 0;
169 break;
170
171 case 4000000:
172 local_irq_save(flags);
173
174 si->hscr0 = 0;
175
176 Ser2HSSR0 = 0xff;
177 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
178 Ser2UTCR3 = 0;
179
180 si->speed = speed;
181
182 if (si->pdata->set_speed)
183 si->pdata->set_speed(si->dev, speed);
184
185 sa1100_irda_rx_alloc(si);
186 sa1100_irda_rx_dma_start(si);
187
188 local_irq_restore(flags);
189
190 break;
191
192 default:
193 break;
194 }
195
196 return ret;
197}
198
199/*
200 * Control the power state of the IrDA transmitter.
201 * State:
202 * 0 - off
203 * 1 - short range, lowest power
204 * 2 - medium range, medium power
205 * 3 - maximum range, high power
206 *
207 * Currently, only assabet is known to support this.
208 */
209static int
210__sa1100_irda_set_power(struct sa1100_irda *si, unsigned int state)
211{
212 int ret = 0;
213 if (si->pdata->set_power)
214 ret = si->pdata->set_power(si->dev, state);
215 return ret;
216}
217
218static inline int
219sa1100_set_power(struct sa1100_irda *si, unsigned int state)
220{
221 int ret;
222
223 ret = __sa1100_irda_set_power(si, state);
224 if (ret == 0)
225 si->power = state;
226
227 return ret;
228}
229
230static int sa1100_irda_startup(struct sa1100_irda *si)
231{
232 int ret;
233
234 /*
235 * Ensure that the ports for this device are setup correctly.
236 */
237 if (si->pdata->startup)
238 si->pdata->startup(si->dev);
239
240 /*
241 * Configure PPC for IRDA - we want to drive TXD2 low.
242 * We also want to drive this pin low during sleep.
243 */
244 PPSR &= ~PPC_TXD2;
245 PSDR &= ~PPC_TXD2;
246 PPDR |= PPC_TXD2;
247
248 /*
249 * Enable HP-SIR modulation, and ensure that the port is disabled.
250 */
251 Ser2UTCR3 = 0;
252 Ser2HSCR0 = HSCR0_UART;
253 Ser2UTCR4 = si->utcr4;
254 Ser2UTCR0 = UTCR0_8BitData;
255 Ser2HSCR2 = HSCR2_TrDataH | HSCR2_RcDataL;
256
257 /*
258 * Clear status register
259 */
260 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
261
262 ret = sa1100_irda_set_speed(si, si->speed = 9600);
263 if (ret) {
264 Ser2UTCR3 = 0;
265 Ser2HSCR0 = 0;
266
267 if (si->pdata->shutdown)
268 si->pdata->shutdown(si->dev);
269 }
270
271 return ret;
272}
273
274static void sa1100_irda_shutdown(struct sa1100_irda *si)
275{
276 /*
277 * Stop all DMA activity.
278 */
279 sa1100_stop_dma(si->rxdma);
280 sa1100_stop_dma(si->txdma);
281
282 /* Disable the port. */
283 Ser2UTCR3 = 0;
284 Ser2HSCR0 = 0;
285
286 if (si->pdata->shutdown)
287 si->pdata->shutdown(si->dev);
288}
289
290#ifdef CONFIG_PM
291/*
292 * Suspend the IrDA interface.
293 */
294static int sa1100_irda_suspend(struct device *_dev, u32 state, u32 level)
295{
296 struct net_device *dev = dev_get_drvdata(_dev);
297 struct sa1100_irda *si;
298
299 if (!dev || level != SUSPEND_DISABLE)
300 return 0;
301
302 si = dev->priv;
303 if (si->open) {
304 /*
305 * Stop the transmit queue
306 */
307 netif_device_detach(dev);
308 disable_irq(dev->irq);
309 sa1100_irda_shutdown(si);
310 __sa1100_irda_set_power(si, 0);
311 }
312
313 return 0;
314}
315
316/*
317 * Resume the IrDA interface.
318 */
319static int sa1100_irda_resume(struct device *_dev, u32 level)
320{
321 struct net_device *dev = dev_get_drvdata(_dev);
322 struct sa1100_irda *si;
323
324 if (!dev || level != RESUME_ENABLE)
325 return 0;
326
327 si = dev->priv;
328 if (si->open) {
329 /*
330 * If we missed a speed change, initialise at the new speed
331 * directly. It is debatable whether this is actually
332 * required, but in the interests of continuing from where
333 * we left off it is desireable. The converse argument is
334 * that we should re-negotiate at 9600 baud again.
335 */
336 if (si->newspeed) {
337 si->speed = si->newspeed;
338 si->newspeed = 0;
339 }
340
341 sa1100_irda_startup(si);
342 __sa1100_irda_set_power(si, si->power);
343 enable_irq(dev->irq);
344
345 /*
346 * This automatically wakes up the queue
347 */
348 netif_device_attach(dev);
349 }
350
351 return 0;
352}
353#else
354#define sa1100_irda_suspend NULL
355#define sa1100_irda_resume NULL
356#endif
357
358/*
359 * HP-SIR format interrupt service routines.
360 */
361static void sa1100_irda_hpsir_irq(struct net_device *dev)
362{
363 struct sa1100_irda *si = dev->priv;
364 int status;
365
366 status = Ser2UTSR0;
367
368 /*
369 * Deal with any receive errors first. The bytes in error may be
370 * the only bytes in the receive FIFO, so we do this first.
371 */
372 while (status & UTSR0_EIF) {
373 int stat, data;
374
375 stat = Ser2UTSR1;
376 data = Ser2UTDR;
377
378 if (stat & (UTSR1_FRE | UTSR1_ROR)) {
379 si->stats.rx_errors++;
380 if (stat & UTSR1_FRE)
381 si->stats.rx_frame_errors++;
382 if (stat & UTSR1_ROR)
383 si->stats.rx_fifo_errors++;
384 } else
385 async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
386
387 status = Ser2UTSR0;
388 }
389
390 /*
391 * We must clear certain bits.
392 */
393 Ser2UTSR0 = status & (UTSR0_RID | UTSR0_RBB | UTSR0_REB);
394
395 if (status & UTSR0_RFS) {
396 /*
397 * There are at least 4 bytes in the FIFO. Read 3 bytes
398 * and leave the rest to the block below.
399 */
400 async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
401 async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
402 async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
403 }
404
405 if (status & (UTSR0_RFS | UTSR0_RID)) {
406 /*
407 * Fifo contains more than 1 character.
408 */
409 do {
410 async_unwrap_char(dev, &si->stats, &si->rx_buff,
411 Ser2UTDR);
412 } while (Ser2UTSR1 & UTSR1_RNE);
413
414 dev->last_rx = jiffies;
415 }
416
417 if (status & UTSR0_TFS && si->tx_buff.len) {
418 /*
419 * Transmitter FIFO is not full
420 */
421 do {
422 Ser2UTDR = *si->tx_buff.data++;
423 si->tx_buff.len -= 1;
424 } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
425
426 if (si->tx_buff.len == 0) {
427 si->stats.tx_packets++;
428 si->stats.tx_bytes += si->tx_buff.data -
429 si->tx_buff.head;
430
431 /*
432 * We need to ensure that the transmitter has
433 * finished.
434 */
435 do
436 rmb();
437 while (Ser2UTSR1 & UTSR1_TBY);
438
439 /*
440 * Ok, we've finished transmitting. Now enable
441 * the receiver. Sometimes we get a receive IRQ
442 * immediately after a transmit...
443 */
444 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
445 Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
446
447 if (si->newspeed) {
448 sa1100_irda_set_speed(si, si->newspeed);
449 si->newspeed = 0;
450 }
451
452 /* I'm hungry! */
453 netif_wake_queue(dev);
454 }
455 }
456}
457
458static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
459{
460 struct sk_buff *skb = si->rxskb;
461 dma_addr_t dma_addr;
462 unsigned int len, stat, data;
463
464 if (!skb) {
465 printk(KERN_ERR "sa1100_ir: SKB is NULL!\n");
466 return;
467 }
468
469 /*
470 * Get the current data position.
471 */
472 dma_addr = sa1100_get_dma_pos(si->rxdma);
473 len = dma_addr - si->rxbuf_dma;
474 if (len > HPSIR_MAX_RXLEN)
475 len = HPSIR_MAX_RXLEN;
476 dma_unmap_single(si->dev, si->rxbuf_dma, len, DMA_FROM_DEVICE);
477
478 do {
479 /*
480 * Read Status, and then Data.
481 */
482 stat = Ser2HSSR1;
483 rmb();
484 data = Ser2HSDR;
485
486 if (stat & (HSSR1_CRE | HSSR1_ROR)) {
487 si->stats.rx_errors++;
488 if (stat & HSSR1_CRE)
489 si->stats.rx_crc_errors++;
490 if (stat & HSSR1_ROR)
491 si->stats.rx_frame_errors++;
492 } else
493 skb->data[len++] = data;
494
495 /*
496 * If we hit the end of frame, there's
497 * no point in continuing.
498 */
499 if (stat & HSSR1_EOF)
500 break;
501 } while (Ser2HSSR0 & HSSR0_EIF);
502
503 if (stat & HSSR1_EOF) {
504 si->rxskb = NULL;
505
506 skb_put(skb, len);
507 skb->dev = dev;
508 skb->mac.raw = skb->data;
509 skb->protocol = htons(ETH_P_IRDA);
510 si->stats.rx_packets++;
511 si->stats.rx_bytes += len;
512
513 /*
514 * Before we pass the buffer up, allocate a new one.
515 */
516 sa1100_irda_rx_alloc(si);
517
518 netif_rx(skb);
519 dev->last_rx = jiffies;
520 } else {
521 /*
522 * Remap the buffer.
523 */
524 si->rxbuf_dma = dma_map_single(si->dev, si->rxskb->data,
525 HPSIR_MAX_RXLEN,
526 DMA_FROM_DEVICE);
527 }
528}
529
530/*
531 * FIR format interrupt service routine. We only have to
532 * handle RX events; transmit events go via the TX DMA handler.
533 *
534 * No matter what, we disable RX, process, and the restart RX.
535 */
536static void sa1100_irda_fir_irq(struct net_device *dev)
537{
538 struct sa1100_irda *si = dev->priv;
539
540 /*
541 * Stop RX DMA
542 */
543 sa1100_stop_dma(si->rxdma);
544
545 /*
546 * Framing error - we throw away the packet completely.
547 * Clearing RXE flushes the error conditions and data
548 * from the fifo.
549 */
550 if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) {
551 si->stats.rx_errors++;
552
553 if (Ser2HSSR0 & HSSR0_FRE)
554 si->stats.rx_frame_errors++;
555
556 /*
557 * Clear out the DMA...
558 */
559 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP;
560
561 /*
562 * Clear selected status bits now, so we
563 * don't miss them next time around.
564 */
565 Ser2HSSR0 = HSSR0_FRE | HSSR0_RAB;
566 }
567
568 /*
569 * Deal with any receive errors. The any of the lowest
570 * 8 bytes in the FIFO may contain an error. We must read
571 * them one by one. The "error" could even be the end of
572 * packet!
573 */
574 if (Ser2HSSR0 & HSSR0_EIF)
575 sa1100_irda_fir_error(si, dev);
576
577 /*
578 * No matter what happens, we must restart reception.
579 */
580 sa1100_irda_rx_dma_start(si);
581}
582
583static irqreturn_t sa1100_irda_irq(int irq, void *dev_id, struct pt_regs *regs)
584{
585 struct net_device *dev = dev_id;
586 if (IS_FIR(((struct sa1100_irda *)dev->priv)))
587 sa1100_irda_fir_irq(dev);
588 else
589 sa1100_irda_hpsir_irq(dev);
590 return IRQ_HANDLED;
591}
592
593/*
594 * TX DMA completion handler.
595 */
596static void sa1100_irda_txdma_irq(void *id)
597{
598 struct net_device *dev = id;
599 struct sa1100_irda *si = dev->priv;
600 struct sk_buff *skb = si->txskb;
601
602 si->txskb = NULL;
603
604 /*
605 * Wait for the transmission to complete. Unfortunately,
606 * the hardware doesn't give us an interrupt to indicate
607 * "end of frame".
608 */
609 do
610 rmb();
611 while (!(Ser2HSSR0 & HSSR0_TUR) || Ser2HSSR1 & HSSR1_TBY);
612
613 /*
614 * Clear the transmit underrun bit.
615 */
616 Ser2HSSR0 = HSSR0_TUR;
617
618 /*
619 * Do we need to change speed? Note that we're lazy
620 * here - we don't free the old rxskb. We don't need
621 * to allocate a buffer either.
622 */
623 if (si->newspeed) {
624 sa1100_irda_set_speed(si, si->newspeed);
625 si->newspeed = 0;
626 }
627
628 /*
629 * Start reception. This disables the transmitter for
630 * us. This will be using the existing RX buffer.
631 */
632 sa1100_irda_rx_dma_start(si);
633
634 /*
635 * Account and free the packet.
636 */
637 if (skb) {
638 dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE);
639 si->stats.tx_packets ++;
640 si->stats.tx_bytes += skb->len;
641 dev_kfree_skb_irq(skb);
642 }
643
644 /*
645 * Make sure that the TX queue is available for sending
646 * (for retries). TX has priority over RX at all times.
647 */
648 netif_wake_queue(dev);
649}
650
651static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
652{
653 struct sa1100_irda *si = dev->priv;
654 int speed = irda_get_next_speed(skb);
655
656 /*
657 * Does this packet contain a request to change the interface
658 * speed? If so, remember it until we complete the transmission
659 * of this frame.
660 */
661 if (speed != si->speed && speed != -1)
662 si->newspeed = speed;
663
664 /*
665 * If this is an empty frame, we can bypass a lot.
666 */
667 if (skb->len == 0) {
668 if (si->newspeed) {
669 si->newspeed = 0;
670 sa1100_irda_set_speed(si, speed);
671 }
672 dev_kfree_skb(skb);
673 return 0;
674 }
675
676 if (!IS_FIR(si)) {
677 netif_stop_queue(dev);
678
679 si->tx_buff.data = si->tx_buff.head;
680 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
681 si->tx_buff.truesize);
682
683 /*
684 * Set the transmit interrupt enable. This will fire
685 * off an interrupt immediately. Note that we disable
686 * the receiver so we won't get spurious characteres
687 * received.
688 */
689 Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE;
690
691 dev_kfree_skb(skb);
692 } else {
693 int mtt = irda_get_mtt(skb);
694
695 /*
696 * We must not be transmitting...
697 */
698 if (si->txskb)
699 BUG();
700
701 netif_stop_queue(dev);
702
703 si->txskb = skb;
704 si->txbuf_dma = dma_map_single(si->dev, skb->data,
705 skb->len, DMA_TO_DEVICE);
706
707 sa1100_start_dma(si->txdma, si->txbuf_dma, skb->len);
708
709 /*
710 * If we have a mean turn-around time, impose the specified
711 * specified delay. We could shorten this by timing from
712 * the point we received the packet.
713 */
714 if (mtt)
715 udelay(mtt);
716
717 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
718 }
719
720 dev->trans_start = jiffies;
721
722 return 0;
723}
724
725static int
726sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
727{
728 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
729 struct sa1100_irda *si = dev->priv;
730 int ret = -EOPNOTSUPP;
731
732 switch (cmd) {
733 case SIOCSBANDWIDTH:
734 if (capable(CAP_NET_ADMIN)) {
735 /*
736 * We are unable to set the speed if the
737 * device is not running.
738 */
739 if (si->open) {
740 ret = sa1100_irda_set_speed(si,
741 rq->ifr_baudrate);
742 } else {
743 printk("sa1100_irda_ioctl: SIOCSBANDWIDTH: !netif_running\n");
744 ret = 0;
745 }
746 }
747 break;
748
749 case SIOCSMEDIABUSY:
750 ret = -EPERM;
751 if (capable(CAP_NET_ADMIN)) {
752 irda_device_set_media_busy(dev, TRUE);
753 ret = 0;
754 }
755 break;
756
757 case SIOCGRECEIVING:
758 rq->ifr_receiving = IS_FIR(si) ? 0
759 : si->rx_buff.state != OUTSIDE_FRAME;
760 break;
761
762 default:
763 break;
764 }
765
766 return ret;
767}
768
769static struct net_device_stats *sa1100_irda_stats(struct net_device *dev)
770{
771 struct sa1100_irda *si = dev->priv;
772 return &si->stats;
773}
774
775static int sa1100_irda_start(struct net_device *dev)
776{
777 struct sa1100_irda *si = dev->priv;
778 int err;
779
780 si->speed = 9600;
781
782 err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
783 if (err)
784 goto err_irq;
785
786 err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive",
787 NULL, NULL, &si->rxdma);
788 if (err)
789 goto err_rx_dma;
790
791 err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit",
792 sa1100_irda_txdma_irq, dev, &si->txdma);
793 if (err)
794 goto err_tx_dma;
795
796 /*
797 * The interrupt must remain disabled for now.
798 */
799 disable_irq(dev->irq);
800
801 /*
802 * Setup the serial port for the specified speed.
803 */
804 err = sa1100_irda_startup(si);
805 if (err)
806 goto err_startup;
807
808 /*
809 * Open a new IrLAP layer instance.
810 */
811 si->irlap = irlap_open(dev, &si->qos, "sa1100");
812 err = -ENOMEM;
813 if (!si->irlap)
814 goto err_irlap;
815
816 /*
817 * Now enable the interrupt and start the queue
818 */
819 si->open = 1;
820 sa1100_set_power(si, power_level); /* low power mode */
821 enable_irq(dev->irq);
822 netif_start_queue(dev);
823 return 0;
824
825err_irlap:
826 si->open = 0;
827 sa1100_irda_shutdown(si);
828err_startup:
829 sa1100_free_dma(si->txdma);
830err_tx_dma:
831 sa1100_free_dma(si->rxdma);
832err_rx_dma:
833 free_irq(dev->irq, dev);
834err_irq:
835 return err;
836}
837
838static int sa1100_irda_stop(struct net_device *dev)
839{
840 struct sa1100_irda *si = dev->priv;
841
842 disable_irq(dev->irq);
843 sa1100_irda_shutdown(si);
844
845 /*
846 * If we have been doing DMA receive, make sure we
847 * tidy that up cleanly.
848 */
849 if (si->rxskb) {
850 dma_unmap_single(si->dev, si->rxbuf_dma, HPSIR_MAX_RXLEN,
851 DMA_FROM_DEVICE);
852 dev_kfree_skb(si->rxskb);
853 si->rxskb = NULL;
854 }
855
856 /* Stop IrLAP */
857 if (si->irlap) {
858 irlap_close(si->irlap);
859 si->irlap = NULL;
860 }
861
862 netif_stop_queue(dev);
863 si->open = 0;
864
865 /*
866 * Free resources
867 */
868 sa1100_free_dma(si->txdma);
869 sa1100_free_dma(si->rxdma);
870 free_irq(dev->irq, dev);
871
872 sa1100_set_power(si, 0);
873
874 return 0;
875}
876
877static int sa1100_irda_init_iobuf(iobuff_t *io, int size)
878{
879 io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
880 if (io->head != NULL) {
881 io->truesize = size;
882 io->in_frame = FALSE;
883 io->state = OUTSIDE_FRAME;
884 io->data = io->head;
885 }
886 return io->head ? 0 : -ENOMEM;
887}
888
889static int sa1100_irda_probe(struct device *_dev)
890{
891 struct platform_device *pdev = to_platform_device(_dev);
892 struct net_device *dev;
893 struct sa1100_irda *si;
894 unsigned int baudrate_mask;
895 int err;
896
897 if (!pdev->dev.platform_data)
898 return -EINVAL;
899
900 err = request_mem_region(__PREG(Ser2UTCR0), 0x24, "IrDA") ? 0 : -EBUSY;
901 if (err)
902 goto err_mem_1;
903 err = request_mem_region(__PREG(Ser2HSCR0), 0x1c, "IrDA") ? 0 : -EBUSY;
904 if (err)
905 goto err_mem_2;
906 err = request_mem_region(__PREG(Ser2HSCR2), 0x04, "IrDA") ? 0 : -EBUSY;
907 if (err)
908 goto err_mem_3;
909
910 dev = alloc_irdadev(sizeof(struct sa1100_irda));
911 if (!dev)
912 goto err_mem_4;
913
914 si = dev->priv;
915 si->dev = &pdev->dev;
916 si->pdata = pdev->dev.platform_data;
917
918 /*
919 * Initialise the HP-SIR buffers
920 */
921 err = sa1100_irda_init_iobuf(&si->rx_buff, 14384);
922 if (err)
923 goto err_mem_5;
924 err = sa1100_irda_init_iobuf(&si->tx_buff, 4000);
925 if (err)
926 goto err_mem_5;
927
928 dev->hard_start_xmit = sa1100_irda_hard_xmit;
929 dev->open = sa1100_irda_start;
930 dev->stop = sa1100_irda_stop;
931 dev->do_ioctl = sa1100_irda_ioctl;
932 dev->get_stats = sa1100_irda_stats;
933 dev->irq = IRQ_Ser2ICP;
934
935 irda_init_max_qos_capabilies(&si->qos);
936
937 /*
938 * We support original IRDA up to 115k2. (we don't currently
939 * support 4Mbps). Min Turn Time set to 1ms or greater.
940 */
941 baudrate_mask = IR_9600;
942
943 switch (max_rate) {
944 case 4000000: baudrate_mask |= IR_4000000 << 8;
945 case 115200: baudrate_mask |= IR_115200;
946 case 57600: baudrate_mask |= IR_57600;
947 case 38400: baudrate_mask |= IR_38400;
948 case 19200: baudrate_mask |= IR_19200;
949 }
950
951 si->qos.baud_rate.bits &= baudrate_mask;
952 si->qos.min_turn_time.bits = 7;
953
954 irda_qos_bits_to_value(&si->qos);
955
956 si->utcr4 = UTCR4_HPSIR;
957 if (tx_lpm)
958 si->utcr4 |= UTCR4_Z1_6us;
959
960 /*
961 * Initially enable HP-SIR modulation, and ensure that the port
962 * is disabled.
963 */
964 Ser2UTCR3 = 0;
965 Ser2UTCR4 = si->utcr4;
966 Ser2HSCR0 = HSCR0_UART;
967
968 err = register_netdev(dev);
969 if (err == 0)
970 dev_set_drvdata(&pdev->dev, dev);
971
972 if (err) {
973 err_mem_5:
974 kfree(si->tx_buff.head);
975 kfree(si->rx_buff.head);
976 free_netdev(dev);
977 err_mem_4:
978 release_mem_region(__PREG(Ser2HSCR2), 0x04);
979 err_mem_3:
980 release_mem_region(__PREG(Ser2HSCR0), 0x1c);
981 err_mem_2:
982 release_mem_region(__PREG(Ser2UTCR0), 0x24);
983 }
984 err_mem_1:
985 return err;
986}
987
988static int sa1100_irda_remove(struct device *_dev)
989{
990 struct net_device *dev = dev_get_drvdata(_dev);
991
992 if (dev) {
993 struct sa1100_irda *si = dev->priv;
994 unregister_netdev(dev);
995 kfree(si->tx_buff.head);
996 kfree(si->rx_buff.head);
997 free_netdev(dev);
998 }
999
1000 release_mem_region(__PREG(Ser2HSCR2), 0x04);
1001 release_mem_region(__PREG(Ser2HSCR0), 0x1c);
1002 release_mem_region(__PREG(Ser2UTCR0), 0x24);
1003
1004 return 0;
1005}
1006
1007static struct device_driver sa1100ir_driver = {
1008 .name = "sa11x0-ir",
1009 .bus = &platform_bus_type,
1010 .probe = sa1100_irda_probe,
1011 .remove = sa1100_irda_remove,
1012 .suspend = sa1100_irda_suspend,
1013 .resume = sa1100_irda_resume,
1014};
1015
1016static int __init sa1100_irda_init(void)
1017{
1018 /*
1019 * Limit power level a sensible range.
1020 */
1021 if (power_level < 1)
1022 power_level = 1;
1023 if (power_level > 3)
1024 power_level = 3;
1025
1026 return driver_register(&sa1100ir_driver);
1027}
1028
1029static void __exit sa1100_irda_exit(void)
1030{
1031 driver_unregister(&sa1100ir_driver);
1032}
1033
1034module_init(sa1100_irda_init);
1035module_exit(sa1100_irda_exit);
1036module_param(power_level, int, 0);
1037module_param(tx_lpm, int, 0);
1038module_param(max_rate, int, 0);
1039
1040MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
1041MODULE_DESCRIPTION("StrongARM SA1100 IrDA driver");
1042MODULE_LICENSE("GPL");
1043MODULE_PARM_DESC(power_level, "IrDA power level, 1 (low) to 3 (high)");
1044MODULE_PARM_DESC(tx_lpm, "Enable transmitter low power (1.6us) mode");
1045MODULE_PARM_DESC(max_rate, "Maximum baud rate (4000000, 115200, 57600, 38400, 19200, 9600)");
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
new file mode 100644
index 000000000000..f0b8bc3637e5
--- /dev/null
+++ b/drivers/net/irda/sir-dev.h
@@ -0,0 +1,202 @@
1/*********************************************************************
2 *
3 * sir.h: include file for irda-sir device abstraction layer
4 *
5 * Copyright (c) 2002 Martin Diehl
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 ********************************************************************/
13
14#ifndef IRDA_SIR_H
15#define IRDA_SIR_H
16
17#include <linux/netdevice.h>
18
19#include <net/irda/irda.h>
20#include <net/irda/irda_device.h> // iobuff_t
21
22/* FIXME: unify irda_request with sir_fsm! */
23
24struct irda_request {
25 struct list_head lh_request;
26 unsigned long pending;
27 void (*func)(void *);
28 void *data;
29 struct timer_list timer;
30};
31
32struct sir_fsm {
33 struct semaphore sem;
34 struct irda_request rq;
35 unsigned state, substate;
36 int param;
37 int result;
38};
39
40#define SIRDEV_STATE_WAIT_TX_COMPLETE 0x0100
41
42/* substates for wait_tx_complete */
43#define SIRDEV_STATE_WAIT_XMIT 0x0101
44#define SIRDEV_STATE_WAIT_UNTIL_SENT 0x0102
45#define SIRDEV_STATE_TX_DONE 0x0103
46
47#define SIRDEV_STATE_DONGLE_OPEN 0x0300
48
49/* 0x0301-0x03ff reserved for individual dongle substates */
50
51#define SIRDEV_STATE_DONGLE_CLOSE 0x0400
52
53/* 0x0401-0x04ff reserved for individual dongle substates */
54
55#define SIRDEV_STATE_SET_DTR_RTS 0x0500
56
57#define SIRDEV_STATE_SET_SPEED 0x0700
58#define SIRDEV_STATE_DONGLE_CHECK 0x0800
59#define SIRDEV_STATE_DONGLE_RESET 0x0900
60
61/* 0x0901-0x09ff reserved for individual dongle substates */
62
63#define SIRDEV_STATE_DONGLE_SPEED 0x0a00
64/* 0x0a01-0x0aff reserved for individual dongle substates */
65
66#define SIRDEV_STATE_PORT_SPEED 0x0b00
67#define SIRDEV_STATE_DONE 0x0c00
68#define SIRDEV_STATE_ERROR 0x0d00
69#define SIRDEV_STATE_COMPLETE 0x0e00
70
71#define SIRDEV_STATE_DEAD 0xffff
72
73
74struct sir_dev;
75
76struct dongle_driver {
77
78 struct module *owner;
79
80 const char *driver_name;
81
82 IRDA_DONGLE type;
83
84 int (*open)(struct sir_dev *dev);
85 int (*close)(struct sir_dev *dev);
86 int (*reset)(struct sir_dev *dev);
87 int (*set_speed)(struct sir_dev *dev, unsigned speed);
88
89 struct list_head dongle_list;
90};
91
92struct sir_driver {
93
94 struct module *owner;
95
96 const char *driver_name;
97
98 int qos_mtt_bits;
99
100 int (*chars_in_buffer)(struct sir_dev *dev);
101 void (*wait_until_sent)(struct sir_dev *dev);
102 int (*set_speed)(struct sir_dev *dev, unsigned speed);
103 int (*set_dtr_rts)(struct sir_dev *dev, int dtr, int rts);
104
105 int (*do_write)(struct sir_dev *dev, const unsigned char *ptr, size_t len);
106
107 int (*start_dev)(struct sir_dev *dev);
108 int (*stop_dev)(struct sir_dev *dev);
109};
110
111
112/* exported */
113
114extern int irda_register_dongle(struct dongle_driver *new);
115extern int irda_unregister_dongle(struct dongle_driver *drv);
116
117extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name);
118extern int sirdev_put_instance(struct sir_dev *self);
119
120extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
121extern void sirdev_write_complete(struct sir_dev *dev);
122extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
123
124/* low level helpers for SIR device/dongle setup */
125extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
126extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
127extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
128
129/* not exported */
130
131extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
132extern int sirdev_put_dongle(struct sir_dev *self);
133
134extern void sirdev_enable_rx(struct sir_dev *dev);
135extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
136extern int __init irda_thread_create(void);
137extern void __exit irda_thread_join(void);
138
139/* inline helpers */
140
141static inline int sirdev_schedule_speed(struct sir_dev *dev, unsigned speed)
142{
143 return sirdev_schedule_request(dev, SIRDEV_STATE_SET_SPEED, speed);
144}
145
146static inline int sirdev_schedule_dongle_open(struct sir_dev *dev, int dongle_id)
147{
148 return sirdev_schedule_request(dev, SIRDEV_STATE_DONGLE_OPEN, dongle_id);
149}
150
151static inline int sirdev_schedule_dongle_close(struct sir_dev *dev)
152{
153 return sirdev_schedule_request(dev, SIRDEV_STATE_DONGLE_CLOSE, 0);
154}
155
156static inline int sirdev_schedule_dtr_rts(struct sir_dev *dev, int dtr, int rts)
157{
158 int dtrrts;
159
160 dtrrts = ((dtr) ? 0x02 : 0x00) | ((rts) ? 0x01 : 0x00);
161 return sirdev_schedule_request(dev, SIRDEV_STATE_SET_DTR_RTS, dtrrts);
162}
163
164#if 0
165static inline int sirdev_schedule_mode(struct sir_dev *dev, int mode)
166{
167 return sirdev_schedule_request(dev, SIRDEV_STATE_SET_MODE, mode);
168}
169#endif
170
171
172struct sir_dev {
173 struct net_device *netdev;
174 struct net_device_stats stats;
175
176 struct irlap_cb *irlap;
177
178 struct qos_info qos;
179
180 char hwname[32];
181
182 struct sir_fsm fsm;
183 atomic_t enable_rx;
184 int raw_tx;
185 spinlock_t tx_lock;
186
187 u32 new_speed;
188 u32 flags;
189
190 unsigned speed;
191
192 iobuff_t tx_buff; /* Transmit buffer */
193 iobuff_t rx_buff; /* Receive buffer */
194 struct sk_buff *tx_skb;
195
196 const struct dongle_driver * dongle_drv;
197 const struct sir_driver * drv;
198 void *priv;
199
200};
201
202#endif /* IRDA_SIR_H */
diff --git a/drivers/net/irda/sir_core.c b/drivers/net/irda/sir_core.c
new file mode 100644
index 000000000000..a49f910c835b
--- /dev/null
+++ b/drivers/net/irda/sir_core.c
@@ -0,0 +1,56 @@
1/*********************************************************************
2 *
3 * sir_core.c: module core for irda-sir abstraction layer
4 *
5 * Copyright (c) 2002 Martin Diehl
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 ********************************************************************/
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17
18#include <net/irda/irda.h>
19
20#include "sir-dev.h"
21
22/***************************************************************************/
23
24MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
25MODULE_DESCRIPTION("IrDA SIR core");
26MODULE_LICENSE("GPL");
27
28/***************************************************************************/
29
30EXPORT_SYMBOL(irda_register_dongle);
31EXPORT_SYMBOL(irda_unregister_dongle);
32
33EXPORT_SYMBOL(sirdev_get_instance);
34EXPORT_SYMBOL(sirdev_put_instance);
35
36EXPORT_SYMBOL(sirdev_set_dongle);
37EXPORT_SYMBOL(sirdev_write_complete);
38EXPORT_SYMBOL(sirdev_receive);
39
40EXPORT_SYMBOL(sirdev_raw_write);
41EXPORT_SYMBOL(sirdev_raw_read);
42EXPORT_SYMBOL(sirdev_set_dtr_rts);
43
44static int __init sir_core_init(void)
45{
46 return irda_thread_create();
47}
48
49static void __exit sir_core_exit(void)
50{
51 irda_thread_join();
52}
53
54module_init(sir_core_init);
55module_exit(sir_core_exit);
56
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
new file mode 100644
index 000000000000..efc5a8870565
--- /dev/null
+++ b/drivers/net/irda/sir_dev.c
@@ -0,0 +1,677 @@
1/*********************************************************************
2 *
3 * sir_dev.c: irda sir network device
4 *
5 * Copyright (c) 2002 Martin Diehl
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 ********************************************************************/
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/smp_lock.h>
18#include <linux/delay.h>
19
20#include <net/irda/irda.h>
21#include <net/irda/wrapper.h>
22#include <net/irda/irda_device.h>
23
24#include "sir-dev.h"
25
26/***************************************************************************/
27
28void sirdev_enable_rx(struct sir_dev *dev)
29{
30 if (unlikely(atomic_read(&dev->enable_rx)))
31 return;
32
33 /* flush rx-buffer - should also help in case of problems with echo cancelation */
34 dev->rx_buff.data = dev->rx_buff.head;
35 dev->rx_buff.len = 0;
36 dev->rx_buff.in_frame = FALSE;
37 dev->rx_buff.state = OUTSIDE_FRAME;
38 atomic_set(&dev->enable_rx, 1);
39}
40
41static int sirdev_is_receiving(struct sir_dev *dev)
42{
43 if (!atomic_read(&dev->enable_rx))
44 return 0;
45
46 return (dev->rx_buff.state != OUTSIDE_FRAME);
47}
48
49int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type)
50{
51 int err;
52
53 IRDA_DEBUG(3, "%s : requesting dongle %d.\n", __FUNCTION__, type);
54
55 err = sirdev_schedule_dongle_open(dev, type);
56 if (unlikely(err))
57 return err;
58 down(&dev->fsm.sem); /* block until config change completed */
59 err = dev->fsm.result;
60 up(&dev->fsm.sem);
61 return err;
62}
63
64/* used by dongle drivers for dongle programming */
65
66int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len)
67{
68 unsigned long flags;
69 int ret;
70
71 if (unlikely(len > dev->tx_buff.truesize))
72 return -ENOSPC;
73
74 spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */
75 while (dev->tx_buff.len > 0) { /* wait until tx idle */
76 spin_unlock_irqrestore(&dev->tx_lock, flags);
77 msleep(10);
78 spin_lock_irqsave(&dev->tx_lock, flags);
79 }
80
81 dev->tx_buff.data = dev->tx_buff.head;
82 memcpy(dev->tx_buff.data, buf, len);
83 dev->tx_buff.len = len;
84
85 ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
86 if (ret > 0) {
87 IRDA_DEBUG(3, "%s(), raw-tx started\n", __FUNCTION__);
88
89 dev->tx_buff.data += ret;
90 dev->tx_buff.len -= ret;
91 dev->raw_tx = 1;
92 ret = len; /* all data is going to be sent */
93 }
94 spin_unlock_irqrestore(&dev->tx_lock, flags);
95 return ret;
96}
97
98/* seems some dongle drivers may need this */
99
100int sirdev_raw_read(struct sir_dev *dev, char *buf, int len)
101{
102 int count;
103
104 if (atomic_read(&dev->enable_rx))
105 return -EIO; /* fail if we expect irda-frames */
106
107 count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len;
108
109 if (count > 0) {
110 memcpy(buf, dev->rx_buff.data, count);
111 dev->rx_buff.data += count;
112 dev->rx_buff.len -= count;
113 }
114
115 /* remaining stuff gets flushed when re-enabling normal rx */
116
117 return count;
118}
119
120int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
121{
122 int ret = -ENXIO;
123 if (dev->drv->set_dtr_rts != 0)
124 ret = dev->drv->set_dtr_rts(dev, dtr, rts);
125 return ret;
126}
127
128/**********************************************************************/
129
130/* called from client driver - likely with bh-context - to indicate
131 * it made some progress with transmission. Hence we send the next
132 * chunk, if any, or complete the skb otherwise
133 */
134
135void sirdev_write_complete(struct sir_dev *dev)
136{
137 unsigned long flags;
138 struct sk_buff *skb;
139 int actual = 0;
140 int err;
141
142 spin_lock_irqsave(&dev->tx_lock, flags);
143
144 IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n",
145 __FUNCTION__, dev->tx_buff.len);
146
147 if (likely(dev->tx_buff.len > 0)) {
148 /* Write data left in transmit buffer */
149 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
150
151 if (likely(actual>0)) {
152 dev->tx_buff.data += actual;
153 dev->tx_buff.len -= actual;
154 }
155 else if (unlikely(actual<0)) {
156 /* could be dropped later when we have tx_timeout to recover */
157 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
158 __FUNCTION__, actual);
159 if ((skb=dev->tx_skb) != NULL) {
160 dev->tx_skb = NULL;
161 dev_kfree_skb_any(skb);
162 dev->stats.tx_errors++;
163 dev->stats.tx_dropped++;
164 }
165 dev->tx_buff.len = 0;
166 }
167 if (dev->tx_buff.len > 0)
168 goto done; /* more data to send later */
169 }
170
171 if (unlikely(dev->raw_tx != 0)) {
172 /* in raw mode we are just done now after the buffer was sent
173 * completely. Since this was requested by some dongle driver
174 * running under the control of the irda-thread we must take
175 * care here not to re-enable the queue. The queue will be
176 * restarted when the irda-thread has completed the request.
177 */
178
179 IRDA_DEBUG(3, "%s(), raw-tx done\n", __FUNCTION__);
180 dev->raw_tx = 0;
181 goto done; /* no post-frame handling in raw mode */
182 }
183
184 /* we have finished now sending this skb.
185 * update statistics and free the skb.
186 * finally we check and trigger a pending speed change, if any.
187 * if not we switch to rx mode and wake the queue for further
188 * packets.
189 * note the scheduled speed request blocks until the lower
190 * client driver and the corresponding hardware has really
191 * finished sending all data (xmit fifo drained f.e.)
192 * before the speed change gets finally done and the queue
193 * re-activated.
194 */
195
196 IRDA_DEBUG(5, "%s(), finished with frame!\n", __FUNCTION__);
197
198 if ((skb=dev->tx_skb) != NULL) {
199 dev->tx_skb = NULL;
200 dev->stats.tx_packets++;
201 dev->stats.tx_bytes += skb->len;
202 dev_kfree_skb_any(skb);
203 }
204
205 if (unlikely(dev->new_speed > 0)) {
206 IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__);
207 err = sirdev_schedule_speed(dev, dev->new_speed);
208 if (unlikely(err)) {
209 /* should never happen
210 * forget the speed change and hope the stack recovers
211 */
212 IRDA_ERROR("%s - schedule speed change failed: %d\n",
213 __FUNCTION__, err);
214 netif_wake_queue(dev->netdev);
215 }
216 /* else: success
217 * speed change in progress now
218 * on completion dev->new_speed gets cleared,
219 * rx-reenabled and the queue restarted
220 */
221 }
222 else {
223 sirdev_enable_rx(dev);
224 netif_wake_queue(dev->netdev);
225 }
226
227done:
228 spin_unlock_irqrestore(&dev->tx_lock, flags);
229}
230
231/* called from client driver - likely with bh-context - to give us
232 * some more received bytes. We put them into the rx-buffer,
233 * normally unwrapping and building LAP-skb's (unless rx disabled)
234 */
235
236int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
237{
238 if (!dev || !dev->netdev) {
239 IRDA_WARNING("%s(), not ready yet!\n", __FUNCTION__);
240 return -1;
241 }
242
243 if (!dev->irlap) {
244 IRDA_WARNING("%s - too early: %p / %zd!\n",
245 __FUNCTION__, cp, count);
246 return -1;
247 }
248
249 if (cp==NULL) {
250 /* error already at lower level receive
251 * just update stats and set media busy
252 */
253 irda_device_set_media_busy(dev->netdev, TRUE);
254 dev->stats.rx_dropped++;
255 IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __FUNCTION__, count);
256 return 0;
257 }
258
259 /* Read the characters into the buffer */
260 if (likely(atomic_read(&dev->enable_rx))) {
261 while (count--)
262 /* Unwrap and destuff one byte */
263 async_unwrap_char(dev->netdev, &dev->stats,
264 &dev->rx_buff, *cp++);
265 } else {
266 while (count--) {
267 /* rx not enabled: save the raw bytes and never
268 * trigger any netif_rx. The received bytes are flushed
269 * later when we re-enable rx but might be read meanwhile
270 * by the dongle driver.
271 */
272 dev->rx_buff.data[dev->rx_buff.len++] = *cp++;
273
274 /* What should we do when the buffer is full? */
275 if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
276 dev->rx_buff.len = 0;
277 }
278 }
279
280 return 0;
281}
282
283/**********************************************************************/
284
285/* callbacks from network layer */
286
287static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
288{
289 struct sir_dev *dev = ndev->priv;
290
291 return (dev) ? &dev->stats : NULL;
292}
293
294static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
295{
296 struct sir_dev *dev = ndev->priv;
297 unsigned long flags;
298 int actual = 0;
299 int err;
300 s32 speed;
301
302 IRDA_ASSERT(dev != NULL, return 0;);
303
304 netif_stop_queue(ndev);
305
306 IRDA_DEBUG(3, "%s(), skb->len = %d\n", __FUNCTION__, skb->len);
307
308 speed = irda_get_next_speed(skb);
309 if ((speed != dev->speed) && (speed != -1)) {
310 if (!skb->len) {
311 err = sirdev_schedule_speed(dev, speed);
312 if (unlikely(err == -EWOULDBLOCK)) {
313 /* Failed to initiate the speed change, likely the fsm
314 * is still busy (pretty unlikely, but...)
315 * We refuse to accept the skb and return with the queue
316 * stopped so the network layer will retry after the
317 * fsm completes and wakes the queue.
318 */
319 return 1;
320 }
321 else if (unlikely(err)) {
322 /* other fatal error - forget the speed change and
323 * hope the stack will recover somehow
324 */
325 netif_start_queue(ndev);
326 }
327 /* else: success
328 * speed change in progress now
329 * on completion the queue gets restarted
330 */
331
332 dev_kfree_skb_any(skb);
333 return 0;
334 } else
335 dev->new_speed = speed;
336 }
337
338 /* Init tx buffer*/
339 dev->tx_buff.data = dev->tx_buff.head;
340
341 /* Check problems */
342 if(spin_is_locked(&dev->tx_lock)) {
343 IRDA_DEBUG(3, "%s(), write not completed\n", __FUNCTION__);
344 }
345
346 /* serialize with write completion */
347 spin_lock_irqsave(&dev->tx_lock, flags);
348
349 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
350 dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize);
351
352 /* transmission will start now - disable receive.
353 * if we are just in the middle of an incoming frame,
354 * treat it as collision. probably it's a good idea to
355 * reset the rx_buf OUTSIDE_FRAME in this case too?
356 */
357 atomic_set(&dev->enable_rx, 0);
358 if (unlikely(sirdev_is_receiving(dev)))
359 dev->stats.collisions++;
360
361 actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
362
363 if (likely(actual > 0)) {
364 dev->tx_skb = skb;
365 ndev->trans_start = jiffies;
366 dev->tx_buff.data += actual;
367 dev->tx_buff.len -= actual;
368 }
369 else if (unlikely(actual < 0)) {
370 /* could be dropped later when we have tx_timeout to recover */
371 IRDA_ERROR("%s: drv->do_write failed (%d)\n",
372 __FUNCTION__, actual);
373 dev_kfree_skb_any(skb);
374 dev->stats.tx_errors++;
375 dev->stats.tx_dropped++;
376 netif_wake_queue(ndev);
377 }
378 spin_unlock_irqrestore(&dev->tx_lock, flags);
379
380 return 0;
381}
382
383/* called from network layer with rtnl hold */
384
385static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
386{
387 struct if_irda_req *irq = (struct if_irda_req *) rq;
388 struct sir_dev *dev = ndev->priv;
389 int ret = 0;
390
391 IRDA_ASSERT(dev != NULL, return -1;);
392
393 IRDA_DEBUG(3, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, ndev->name, cmd);
394
395 switch (cmd) {
396 case SIOCSBANDWIDTH: /* Set bandwidth */
397 if (!capable(CAP_NET_ADMIN))
398 ret = -EPERM;
399 else
400 ret = sirdev_schedule_speed(dev, irq->ifr_baudrate);
401 /* cannot sleep here for completion
402 * we are called from network layer with rtnl hold
403 */
404 break;
405
406 case SIOCSDONGLE: /* Set dongle */
407 if (!capable(CAP_NET_ADMIN))
408 ret = -EPERM;
409 else
410 ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle);
411 /* cannot sleep here for completion
412 * we are called from network layer with rtnl hold
413 */
414 break;
415
416 case SIOCSMEDIABUSY: /* Set media busy */
417 if (!capable(CAP_NET_ADMIN))
418 ret = -EPERM;
419 else
420 irda_device_set_media_busy(dev->netdev, TRUE);
421 break;
422
423 case SIOCGRECEIVING: /* Check if we are receiving right now */
424 irq->ifr_receiving = sirdev_is_receiving(dev);
425 break;
426
427 case SIOCSDTRRTS:
428 if (!capable(CAP_NET_ADMIN))
429 ret = -EPERM;
430 else
431 ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
432 /* cannot sleep here for completion
433 * we are called from network layer with rtnl hold
434 */
435 break;
436
437 case SIOCSMODE:
438#if 0
439 if (!capable(CAP_NET_ADMIN))
440 ret = -EPERM;
441 else
442 ret = sirdev_schedule_mode(dev, irq->ifr_mode);
443 /* cannot sleep here for completion
444 * we are called from network layer with rtnl hold
445 */
446 break;
447#endif
448 default:
449 ret = -EOPNOTSUPP;
450 }
451
452 return ret;
453}
454
455/* ----------------------------------------------------------------------------- */
456
457#define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */
458
459static int sirdev_alloc_buffers(struct sir_dev *dev)
460{
461 dev->tx_buff.truesize = SIRBUF_ALLOCSIZE;
462 dev->rx_buff.truesize = IRDA_SKB_MAX_MTU;
463
464 /* Bootstrap ZeroCopy Rx */
465 dev->rx_buff.skb = __dev_alloc_skb(dev->rx_buff.truesize, GFP_KERNEL);
466 if (dev->rx_buff.skb == NULL)
467 return -ENOMEM;
468 skb_reserve(dev->rx_buff.skb, 1);
469 dev->rx_buff.head = dev->rx_buff.skb->data;
470
471 dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL);
472 if (dev->tx_buff.head == NULL) {
473 kfree_skb(dev->rx_buff.skb);
474 dev->rx_buff.skb = NULL;
475 dev->rx_buff.head = NULL;
476 return -ENOMEM;
477 }
478
479 dev->tx_buff.data = dev->tx_buff.head;
480 dev->rx_buff.data = dev->rx_buff.head;
481 dev->tx_buff.len = 0;
482 dev->rx_buff.len = 0;
483
484 dev->rx_buff.in_frame = FALSE;
485 dev->rx_buff.state = OUTSIDE_FRAME;
486 return 0;
487};
488
489static void sirdev_free_buffers(struct sir_dev *dev)
490{
491 if (dev->rx_buff.skb)
492 kfree_skb(dev->rx_buff.skb);
493 if (dev->tx_buff.head)
494 kfree(dev->tx_buff.head);
495 dev->rx_buff.head = dev->tx_buff.head = NULL;
496 dev->rx_buff.skb = NULL;
497}
498
499static int sirdev_open(struct net_device *ndev)
500{
501 struct sir_dev *dev = ndev->priv;
502 const struct sir_driver *drv = dev->drv;
503
504 if (!drv)
505 return -ENODEV;
506
507 /* increase the reference count of the driver module before doing serious stuff */
508 if (!try_module_get(drv->owner))
509 return -ESTALE;
510
511 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
512
513 if (sirdev_alloc_buffers(dev))
514 goto errout_dec;
515
516 if (!dev->drv->start_dev || dev->drv->start_dev(dev))
517 goto errout_free;
518
519 sirdev_enable_rx(dev);
520 dev->raw_tx = 0;
521
522 netif_start_queue(ndev);
523 dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname);
524 if (!dev->irlap)
525 goto errout_stop;
526
527 netif_wake_queue(ndev);
528
529 IRDA_DEBUG(2, "%s - done, speed = %d\n", __FUNCTION__, dev->speed);
530
531 return 0;
532
533errout_stop:
534 atomic_set(&dev->enable_rx, 0);
535 if (dev->drv->stop_dev)
536 dev->drv->stop_dev(dev);
537errout_free:
538 sirdev_free_buffers(dev);
539errout_dec:
540 module_put(drv->owner);
541 return -EAGAIN;
542}
543
544static int sirdev_close(struct net_device *ndev)
545{
546 struct sir_dev *dev = ndev->priv;
547 const struct sir_driver *drv;
548
549// IRDA_DEBUG(0, "%s\n", __FUNCTION__);
550
551 netif_stop_queue(ndev);
552
553 down(&dev->fsm.sem); /* block on pending config completion */
554
555 atomic_set(&dev->enable_rx, 0);
556
557 if (unlikely(!dev->irlap))
558 goto out;
559 irlap_close(dev->irlap);
560 dev->irlap = NULL;
561
562 drv = dev->drv;
563 if (unlikely(!drv || !dev->priv))
564 goto out;
565
566 if (drv->stop_dev)
567 drv->stop_dev(dev);
568
569 sirdev_free_buffers(dev);
570 module_put(drv->owner);
571
572out:
573 dev->speed = 0;
574 up(&dev->fsm.sem);
575 return 0;
576}
577
578/* ----------------------------------------------------------------------------- */
579
580struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name)
581{
582 struct net_device *ndev;
583 struct sir_dev *dev;
584
585 IRDA_DEBUG(0, "%s - %s\n", __FUNCTION__, name);
586
587 /* instead of adding tests to protect against drv->do_write==NULL
588 * at several places we refuse to create a sir_dev instance for
589 * drivers which don't implement do_write.
590 */
591 if (!drv || !drv->do_write)
592 return NULL;
593
594 /*
595 * Allocate new instance of the device
596 */
597 ndev = alloc_irdadev(sizeof(*dev));
598 if (ndev == NULL) {
599 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __FUNCTION__);
600 goto out;
601 }
602 dev = ndev->priv;
603
604 irda_init_max_qos_capabilies(&dev->qos);
605 dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
606 dev->qos.min_turn_time.bits = drv->qos_mtt_bits;
607 irda_qos_bits_to_value(&dev->qos);
608
609 strncpy(dev->hwname, name, sizeof(dev->hwname)-1);
610
611 atomic_set(&dev->enable_rx, 0);
612 dev->tx_skb = NULL;
613
614 spin_lock_init(&dev->tx_lock);
615 init_MUTEX(&dev->fsm.sem);
616
617 INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
618 dev->fsm.rq.pending = 0;
619 init_timer(&dev->fsm.rq.timer);
620
621 dev->drv = drv;
622 dev->netdev = ndev;
623
624 SET_MODULE_OWNER(ndev);
625
626 /* Override the network functions we need to use */
627 ndev->hard_start_xmit = sirdev_hard_xmit;
628 ndev->open = sirdev_open;
629 ndev->stop = sirdev_close;
630 ndev->get_stats = sirdev_get_stats;
631 ndev->do_ioctl = sirdev_ioctl;
632
633 if (register_netdev(ndev)) {
634 IRDA_ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
635 goto out_freenetdev;
636 }
637
638 return dev;
639
640out_freenetdev:
641 free_netdev(ndev);
642out:
643 return NULL;
644}
645
646int sirdev_put_instance(struct sir_dev *dev)
647{
648 int err = 0;
649
650 IRDA_DEBUG(0, "%s\n", __FUNCTION__);
651
652 atomic_set(&dev->enable_rx, 0);
653
654 netif_carrier_off(dev->netdev);
655 netif_device_detach(dev->netdev);
656
657 if (dev->dongle_drv)
658 err = sirdev_schedule_dongle_close(dev);
659 if (err)
660 IRDA_ERROR("%s - error %d\n", __FUNCTION__, err);
661
662 sirdev_close(dev->netdev);
663
664 down(&dev->fsm.sem);
665 dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */
666 dev->dongle_drv = NULL;
667 dev->priv = NULL;
668 up(&dev->fsm.sem);
669
670 /* Remove netdevice */
671 unregister_netdev(dev->netdev);
672
673 free_netdev(dev->netdev);
674
675 return 0;
676}
677
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c
new file mode 100644
index 000000000000..c5b76746e72b
--- /dev/null
+++ b/drivers/net/irda/sir_dongle.c
@@ -0,0 +1,134 @@
1/*********************************************************************
2 *
3 * sir_dongle.c: manager for serial dongle protocol drivers
4 *
5 * Copyright (c) 2002 Martin Diehl
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 ********************************************************************/
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/smp_lock.h>
18#include <linux/kmod.h>
19
20#include <net/irda/irda.h>
21
22#include "sir-dev.h"
23
24/**************************************************************************
25 *
26 * dongle registration and attachment
27 *
28 */
29
30static LIST_HEAD(dongle_list); /* list of registered dongle drivers */
31static DECLARE_MUTEX(dongle_list_lock); /* protects the list */
32
33int irda_register_dongle(struct dongle_driver *new)
34{
35 struct list_head *entry;
36 struct dongle_driver *drv;
37
38 IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
39 __FUNCTION__, new->driver_name, new->type);
40
41 down(&dongle_list_lock);
42 list_for_each(entry, &dongle_list) {
43 drv = list_entry(entry, struct dongle_driver, dongle_list);
44 if (new->type == drv->type) {
45 up(&dongle_list_lock);
46 return -EEXIST;
47 }
48 }
49 list_add(&new->dongle_list, &dongle_list);
50 up(&dongle_list_lock);
51 return 0;
52}
53
54int irda_unregister_dongle(struct dongle_driver *drv)
55{
56 down(&dongle_list_lock);
57 list_del(&drv->dongle_list);
58 up(&dongle_list_lock);
59 return 0;
60}
61
62int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
63{
64 struct list_head *entry;
65 const struct dongle_driver *drv = NULL;
66 int err = -EINVAL;
67
68#ifdef CONFIG_KMOD
69 request_module("irda-dongle-%d", type);
70#endif
71
72 if (dev->dongle_drv != NULL)
73 return -EBUSY;
74
75 /* serialize access to the list of registered dongles */
76 down(&dongle_list_lock);
77
78 list_for_each(entry, &dongle_list) {
79 drv = list_entry(entry, struct dongle_driver, dongle_list);
80 if (drv->type == type)
81 break;
82 else
83 drv = NULL;
84 }
85
86 if (!drv) {
87 err = -ENODEV;
88 goto out_unlock; /* no such dongle */
89 }
90
91 /* handling of SMP races with dongle module removal - three cases:
92 * 1) dongle driver was already unregistered - then we haven't found the
93 * requested dongle above and are already out here
94 * 2) the module is already marked deleted but the driver is still
95 * registered - then the try_module_get() below will fail
96 * 3) the try_module_get() below succeeds before the module is marked
97 * deleted - then sys_delete_module() fails and prevents the removal
98 * because the module is in use.
99 */
100
101 if (!try_module_get(drv->owner)) {
102 err = -ESTALE;
103 goto out_unlock; /* rmmod already pending */
104 }
105 dev->dongle_drv = drv;
106
107 if (!drv->open || (err=drv->open(dev))!=0)
108 goto out_reject; /* failed to open driver */
109
110 up(&dongle_list_lock);
111 return 0;
112
113out_reject:
114 dev->dongle_drv = NULL;
115 module_put(drv->owner);
116out_unlock:
117 up(&dongle_list_lock);
118 return err;
119}
120
121int sirdev_put_dongle(struct sir_dev *dev)
122{
123 const struct dongle_driver *drv = dev->dongle_drv;
124
125 if (drv) {
126 if (drv->close)
127 drv->close(dev); /* close this dongle instance */
128
129 dev->dongle_drv = NULL; /* unlink the dongle driver */
130 module_put(drv->owner);/* decrement driver's module refcount */
131 }
132
133 return 0;
134}
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
new file mode 100644
index 000000000000..18cea1099530
--- /dev/null
+++ b/drivers/net/irda/sir_kthread.c
@@ -0,0 +1,502 @@
1/*********************************************************************
2 *
3 * sir_kthread.c: dedicated thread to process scheduled
4 * sir device setup requests
5 *
6 * Copyright (c) 2002 Martin Diehl
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 ********************************************************************/
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
18#include <linux/init.h>
19#include <linux/smp_lock.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
22
23#include <net/irda/irda.h>
24
25#include "sir-dev.h"
26
27/**************************************************************************
28 *
29 * kIrDAd kernel thread and config state machine
30 *
31 */
32
33struct irda_request_queue {
34 struct list_head request_list;
35 spinlock_t lock;
36 task_t *thread;
37 struct completion exit;
38 wait_queue_head_t kick, done;
39 atomic_t num_pending;
40};
41
42static struct irda_request_queue irda_rq_queue;
43
44static int irda_queue_request(struct irda_request *rq)
45{
46 int ret = 0;
47 unsigned long flags;
48
49 if (!test_and_set_bit(0, &rq->pending)) {
50 spin_lock_irqsave(&irda_rq_queue.lock, flags);
51 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
52 wake_up(&irda_rq_queue.kick);
53 atomic_inc(&irda_rq_queue.num_pending);
54 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
55 ret = 1;
56 }
57 return ret;
58}
59
60static void irda_request_timer(unsigned long data)
61{
62 struct irda_request *rq = (struct irda_request *)data;
63 unsigned long flags;
64
65 spin_lock_irqsave(&irda_rq_queue.lock, flags);
66 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
67 wake_up(&irda_rq_queue.kick);
68 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
69}
70
71static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
72{
73 int ret = 0;
74 struct timer_list *timer = &rq->timer;
75
76 if (!test_and_set_bit(0, &rq->pending)) {
77 timer->expires = jiffies + delay;
78 timer->function = irda_request_timer;
79 timer->data = (unsigned long)rq;
80 atomic_inc(&irda_rq_queue.num_pending);
81 add_timer(timer);
82 ret = 1;
83 }
84 return ret;
85}
86
87static void run_irda_queue(void)
88{
89 unsigned long flags;
90 struct list_head *entry, *tmp;
91 struct irda_request *rq;
92
93 spin_lock_irqsave(&irda_rq_queue.lock, flags);
94 list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
95 rq = list_entry(entry, struct irda_request, lh_request);
96 list_del_init(entry);
97 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
98
99 clear_bit(0, &rq->pending);
100 rq->func(rq->data);
101
102 if (atomic_dec_and_test(&irda_rq_queue.num_pending))
103 wake_up(&irda_rq_queue.done);
104
105 spin_lock_irqsave(&irda_rq_queue.lock, flags);
106 }
107 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
108}
109
110static int irda_thread(void *startup)
111{
112 DECLARE_WAITQUEUE(wait, current);
113
114 daemonize("kIrDAd");
115
116 irda_rq_queue.thread = current;
117
118 complete((struct completion *)startup);
119
120 while (irda_rq_queue.thread != NULL) {
121
122 /* We use TASK_INTERRUPTIBLE, rather than
123 * TASK_UNINTERRUPTIBLE. Andrew Morton made this
124 * change ; he told me that it is safe, because "signal
125 * blocking is now handled in daemonize()", he added
126 * that the problem is that "uninterruptible sleep
127 * contributes to load average", making user worry.
128 * Jean II */
129 set_task_state(current, TASK_INTERRUPTIBLE);
130 add_wait_queue(&irda_rq_queue.kick, &wait);
131 if (list_empty(&irda_rq_queue.request_list))
132 schedule();
133 else
134 __set_task_state(current, TASK_RUNNING);
135 remove_wait_queue(&irda_rq_queue.kick, &wait);
136
137 /* make swsusp happy with our thread */
138 if (current->flags & PF_FREEZE)
139 refrigerator(PF_FREEZE);
140
141 run_irda_queue();
142 }
143
144#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
145 reparent_to_init();
146#endif
147 complete_and_exit(&irda_rq_queue.exit, 0);
148 /* never reached */
149 return 0;
150}
151
152
153static void flush_irda_queue(void)
154{
155 if (atomic_read(&irda_rq_queue.num_pending)) {
156
157 DECLARE_WAITQUEUE(wait, current);
158
159 if (!list_empty(&irda_rq_queue.request_list))
160 run_irda_queue();
161
162 set_task_state(current, TASK_UNINTERRUPTIBLE);
163 add_wait_queue(&irda_rq_queue.done, &wait);
164 if (atomic_read(&irda_rq_queue.num_pending))
165 schedule();
166 else
167 __set_task_state(current, TASK_RUNNING);
168 remove_wait_queue(&irda_rq_queue.done, &wait);
169 }
170}
171
172/* substate handler of the config-fsm to handle the cases where we want
173 * to wait for transmit completion before changing the port configuration
174 */
175
176static int irda_tx_complete_fsm(struct sir_dev *dev)
177{
178 struct sir_fsm *fsm = &dev->fsm;
179 unsigned next_state, delay;
180 unsigned bytes_left;
181
182 do {
183 next_state = fsm->substate; /* default: stay in current substate */
184 delay = 0;
185
186 switch(fsm->substate) {
187
188 case SIRDEV_STATE_WAIT_XMIT:
189 if (dev->drv->chars_in_buffer)
190 bytes_left = dev->drv->chars_in_buffer(dev);
191 else
192 bytes_left = 0;
193 if (!bytes_left) {
194 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
195 break;
196 }
197
198 if (dev->speed > 115200)
199 delay = (bytes_left*8*10000) / (dev->speed/100);
200 else if (dev->speed > 0)
201 delay = (bytes_left*10*10000) / (dev->speed/100);
202 else
203 delay = 0;
204 /* expected delay (usec) until remaining bytes are sent */
205 if (delay < 100) {
206 udelay(delay);
207 delay = 0;
208 break;
209 }
210 /* sleep some longer delay (msec) */
211 delay = (delay+999) / 1000;
212 break;
213
214 case SIRDEV_STATE_WAIT_UNTIL_SENT:
215 /* block until underlaying hardware buffer are empty */
216 if (dev->drv->wait_until_sent)
217 dev->drv->wait_until_sent(dev);
218 next_state = SIRDEV_STATE_TX_DONE;
219 break;
220
221 case SIRDEV_STATE_TX_DONE:
222 return 0;
223
224 default:
225 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
226 return -EINVAL;
227 }
228 fsm->substate = next_state;
229 } while (delay == 0);
230 return delay;
231}
232
233/*
234 * Function irda_config_fsm
235 *
236 * State machine to handle the configuration of the device (and attached dongle, if any).
237 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
238 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
239 * long. Instead, for longer delays we start a timer to reschedule us later.
240 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
241 * Both must be unlocked/restarted on completion - but only on final exit.
242 */
243
244static void irda_config_fsm(void *data)
245{
246 struct sir_dev *dev = data;
247 struct sir_fsm *fsm = &dev->fsm;
248 int next_state;
249 int ret = -1;
250 unsigned delay;
251
252 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
253
254 do {
255 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
256 __FUNCTION__, fsm->state, fsm->substate);
257
258 next_state = fsm->state;
259 delay = 0;
260
261 switch(fsm->state) {
262
263 case SIRDEV_STATE_DONGLE_OPEN:
264 if (dev->dongle_drv != NULL) {
265 ret = sirdev_put_dongle(dev);
266 if (ret) {
267 fsm->result = -EINVAL;
268 next_state = SIRDEV_STATE_ERROR;
269 break;
270 }
271 }
272
273 /* Initialize dongle */
274 ret = sirdev_get_dongle(dev, fsm->param);
275 if (ret) {
276 fsm->result = ret;
277 next_state = SIRDEV_STATE_ERROR;
278 break;
279 }
280
281 /* Dongles are powered through the modem control lines which
282 * were just set during open. Before resetting, let's wait for
283 * the power to stabilize. This is what some dongle drivers did
284 * in open before, while others didn't - should be safe anyway.
285 */
286
287 delay = 50;
288 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
289 next_state = SIRDEV_STATE_DONGLE_RESET;
290
291 fsm->param = 9600;
292
293 break;
294
295 case SIRDEV_STATE_DONGLE_CLOSE:
296 /* shouldn't we just treat this as success=? */
297 if (dev->dongle_drv == NULL) {
298 fsm->result = -EINVAL;
299 next_state = SIRDEV_STATE_ERROR;
300 break;
301 }
302
303 ret = sirdev_put_dongle(dev);
304 if (ret) {
305 fsm->result = ret;
306 next_state = SIRDEV_STATE_ERROR;
307 break;
308 }
309 next_state = SIRDEV_STATE_DONE;
310 break;
311
312 case SIRDEV_STATE_SET_DTR_RTS:
313 ret = sirdev_set_dtr_rts(dev,
314 (fsm->param&0x02) ? TRUE : FALSE,
315 (fsm->param&0x01) ? TRUE : FALSE);
316 next_state = SIRDEV_STATE_DONE;
317 break;
318
319 case SIRDEV_STATE_SET_SPEED:
320 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
321 next_state = SIRDEV_STATE_DONGLE_CHECK;
322 break;
323
324 case SIRDEV_STATE_DONGLE_CHECK:
325 ret = irda_tx_complete_fsm(dev);
326 if (ret < 0) {
327 fsm->result = ret;
328 next_state = SIRDEV_STATE_ERROR;
329 break;
330 }
331 if ((delay=ret) != 0)
332 break;
333
334 if (dev->dongle_drv) {
335 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
336 next_state = SIRDEV_STATE_DONGLE_RESET;
337 }
338 else {
339 dev->speed = fsm->param;
340 next_state = SIRDEV_STATE_PORT_SPEED;
341 }
342 break;
343
344 case SIRDEV_STATE_DONGLE_RESET:
345 if (dev->dongle_drv->reset) {
346 ret = dev->dongle_drv->reset(dev);
347 if (ret < 0) {
348 fsm->result = ret;
349 next_state = SIRDEV_STATE_ERROR;
350 break;
351 }
352 }
353 else
354 ret = 0;
355 if ((delay=ret) == 0) {
356 /* set serial port according to dongle default speed */
357 if (dev->drv->set_speed)
358 dev->drv->set_speed(dev, dev->speed);
359 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
360 next_state = SIRDEV_STATE_DONGLE_SPEED;
361 }
362 break;
363
364 case SIRDEV_STATE_DONGLE_SPEED:
365 if (dev->dongle_drv->reset) {
366 ret = dev->dongle_drv->set_speed(dev, fsm->param);
367 if (ret < 0) {
368 fsm->result = ret;
369 next_state = SIRDEV_STATE_ERROR;
370 break;
371 }
372 }
373 else
374 ret = 0;
375 if ((delay=ret) == 0)
376 next_state = SIRDEV_STATE_PORT_SPEED;
377 break;
378
379 case SIRDEV_STATE_PORT_SPEED:
380 /* Finally we are ready to change the serial port speed */
381 if (dev->drv->set_speed)
382 dev->drv->set_speed(dev, dev->speed);
383 dev->new_speed = 0;
384 next_state = SIRDEV_STATE_DONE;
385 break;
386
387 case SIRDEV_STATE_DONE:
388 /* Signal network layer so it can send more frames */
389 netif_wake_queue(dev->netdev);
390 next_state = SIRDEV_STATE_COMPLETE;
391 break;
392
393 default:
394 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
395 fsm->result = -EINVAL;
396 /* fall thru */
397
398 case SIRDEV_STATE_ERROR:
399 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
400
401#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
402 netif_stop_queue(dev->netdev);
403#else
404 netif_wake_queue(dev->netdev);
405#endif
406 /* fall thru */
407
408 case SIRDEV_STATE_COMPLETE:
409 /* config change finished, so we are not busy any longer */
410 sirdev_enable_rx(dev);
411 up(&fsm->sem);
412 return;
413 }
414 fsm->state = next_state;
415 } while(!delay);
416
417 irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
418}
419
420/* schedule some device configuration task for execution by kIrDAd
421 * on behalf of the above state machine.
422 * can be called from process or interrupt/tasklet context.
423 */
424
425int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
426{
427 struct sir_fsm *fsm = &dev->fsm;
428 int xmit_was_down;
429
430 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
431
432 if (down_trylock(&fsm->sem)) {
433 if (in_interrupt() || in_atomic() || irqs_disabled()) {
434 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
435 return -EWOULDBLOCK;
436 } else
437 down(&fsm->sem);
438 }
439
440 if (fsm->state == SIRDEV_STATE_DEAD) {
441 /* race with sirdev_close should never happen */
442 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
443 up(&fsm->sem);
444 return -ESTALE; /* or better EPIPE? */
445 }
446
447 xmit_was_down = netif_queue_stopped(dev->netdev);
448 netif_stop_queue(dev->netdev);
449 atomic_set(&dev->enable_rx, 0);
450
451 fsm->state = initial_state;
452 fsm->param = param;
453 fsm->result = 0;
454
455 INIT_LIST_HEAD(&fsm->rq.lh_request);
456 fsm->rq.pending = 0;
457 fsm->rq.func = irda_config_fsm;
458 fsm->rq.data = dev;
459
460 if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
461 atomic_set(&dev->enable_rx, 1);
462 if (!xmit_was_down)
463 netif_wake_queue(dev->netdev);
464 up(&fsm->sem);
465 return -EAGAIN;
466 }
467 return 0;
468}
469
470int __init irda_thread_create(void)
471{
472 struct completion startup;
473 int pid;
474
475 spin_lock_init(&irda_rq_queue.lock);
476 irda_rq_queue.thread = NULL;
477 INIT_LIST_HEAD(&irda_rq_queue.request_list);
478 init_waitqueue_head(&irda_rq_queue.kick);
479 init_waitqueue_head(&irda_rq_queue.done);
480 atomic_set(&irda_rq_queue.num_pending, 0);
481
482 init_completion(&startup);
483 pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
484 if (pid <= 0)
485 return -EAGAIN;
486 else
487 wait_for_completion(&startup);
488
489 return 0;
490}
491
492void __exit irda_thread_join(void)
493{
494 if (irda_rq_queue.thread) {
495 flush_irda_queue();
496 init_completion(&irda_rq_queue.exit);
497 irda_rq_queue.thread = NULL;
498 wake_up(&irda_rq_queue.kick);
499 wait_for_completion(&irda_rq_queue.exit);
500 }
501}
502
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
new file mode 100644
index 000000000000..10125a1dba22
--- /dev/null
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -0,0 +1,2396 @@
1/*********************************************************************
2 * $Id: smsc-ircc2.c,v 1.19.2.5 2002/10/27 11:34:26 dip Exp $
3 *
4 * Description: Driver for the SMC Infrared Communications Controller
5 * Status: Experimental.
6 * Author: Daniele Peri (peri@csai.unipa.it)
7 * Created at:
8 * Modified at:
9 * Modified by:
10 *
11 * Copyright (c) 2002 Daniele Peri
12 * All Rights Reserved.
13 * Copyright (c) 2002 Jean Tourrilhes
14 *
15 *
16 * Based on smc-ircc.c:
17 *
18 * Copyright (c) 2001 Stefani Seibold
19 * Copyright (c) 1999-2001 Dag Brattli
20 * Copyright (c) 1998-1999 Thomas Davis,
21 *
22 * and irport.c:
23 *
24 * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved.
25 *
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License as
29 * published by the Free Software Foundation; either version 2 of
30 * the License, or (at your option) any later version.
31 *
32 * This program is distributed in the hope that it will be useful,
33 * but WITHOUT ANY WARRANTY; without even the implied warranty of
34 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35 * GNU General Public License for more details.
36 *
37 * You should have received a copy of the GNU General Public License
38 * along with this program; if not, write to the Free Software
39 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
40 * MA 02111-1307 USA
41 *
42 ********************************************************************/
43
44#include <linux/module.h>
45#include <linux/kernel.h>
46#include <linux/types.h>
47#include <linux/skbuff.h>
48#include <linux/netdevice.h>
49#include <linux/ioport.h>
50#include <linux/delay.h>
51#include <linux/slab.h>
52#include <linux/init.h>
53#include <linux/rtnetlink.h>
54#include <linux/serial_reg.h>
55#include <linux/dma-mapping.h>
56
57#include <asm/io.h>
58#include <asm/dma.h>
59#include <asm/byteorder.h>
60
61#include <linux/spinlock.h>
62#include <linux/pm.h>
63
64#include <net/irda/wrapper.h>
65#include <net/irda/irda.h>
66#include <net/irda/irda_device.h>
67
68#include "smsc-ircc2.h"
69#include "smsc-sio.h"
70
71/* Types */
72
73struct smsc_transceiver {
74 char *name;
75 void (*set_for_speed)(int fir_base, u32 speed);
76 int (*probe)(int fir_base);
77};
78typedef struct smsc_transceiver smsc_transceiver_t;
79
80#if 0
81struct smc_chip {
82 char *name;
83 u16 flags;
84 u8 devid;
85 u8 rev;
86};
87typedef struct smc_chip smc_chip_t;
88#endif
89
90struct smsc_chip {
91 char *name;
92 #if 0
93 u8 type;
94 #endif
95 u16 flags;
96 u8 devid;
97 u8 rev;
98};
99typedef struct smsc_chip smsc_chip_t;
100
101struct smsc_chip_address {
102 unsigned int cfg_base;
103 unsigned int type;
104};
105typedef struct smsc_chip_address smsc_chip_address_t;
106
107/* Private data for each instance */
108struct smsc_ircc_cb {
109 struct net_device *netdev; /* Yes! we are some kind of netdevice */
110 struct net_device_stats stats;
111 struct irlap_cb *irlap; /* The link layer we are binded to */
112
113 chipio_t io; /* IrDA controller information */
114 iobuff_t tx_buff; /* Transmit buffer */
115 iobuff_t rx_buff; /* Receive buffer */
116 dma_addr_t tx_buff_dma;
117 dma_addr_t rx_buff_dma;
118
119 struct qos_info qos; /* QoS capabilities for this device */
120
121 spinlock_t lock; /* For serializing operations */
122
123 __u32 new_speed;
124 __u32 flags; /* Interface flags */
125
126 int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
127 int tx_len; /* Number of frames in tx_buff */
128
129 int transceiver;
130 struct pm_dev *pmdev;
131};
132
133/* Constants */
134
135static const char *driver_name = "smsc-ircc2";
136#define DIM(x) (sizeof(x)/(sizeof(*(x))))
137#define SMSC_IRCC2_C_IRDA_FALLBACK_SPEED 9600
138#define SMSC_IRCC2_C_DEFAULT_TRANSCEIVER 1
139#define SMSC_IRCC2_C_NET_TIMEOUT 0
140#define SMSC_IRCC2_C_SIR_STOP 0
141
142/* Prototypes */
143
144static int smsc_ircc_open(unsigned int firbase, unsigned int sirbase, u8 dma, u8 irq);
145static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base);
146static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq);
147static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self);
148static void smsc_ircc_init_chip(struct smsc_ircc_cb *self);
149static int __exit smsc_ircc_close(struct smsc_ircc_cb *self);
150static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase);
151static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase);
152static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self);
153static int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev);
154static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev);
155static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs);
156static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase);
157static void smsc_ircc_change_speed(void *priv, u32 speed);
158static void smsc_ircc_set_sir_speed(void *priv, u32 speed);
159static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
160static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev);
161static void smsc_ircc_sir_start(struct smsc_ircc_cb *self);
162#if SMSC_IRCC2_C_SIR_STOP
163static void smsc_ircc_sir_stop(struct smsc_ircc_cb *self);
164#endif
165static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self);
166static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len);
167static int smsc_ircc_net_open(struct net_device *dev);
168static int smsc_ircc_net_close(struct net_device *dev);
169static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
170#if SMSC_IRCC2_C_NET_TIMEOUT
171static void smsc_ircc_timeout(struct net_device *dev);
172#endif
173static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev);
174static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
175static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
176static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
177static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
178static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
179
180/* Probing */
181static int __init smsc_ircc_look_for_chips(void);
182static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg,const smsc_chip_t *chip,char *type);
183static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfg_base, char *type);
184static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cfg_base, char *type);
185static int __init smsc_superio_fdc(unsigned short cfg_base);
186static int __init smsc_superio_lpc(unsigned short cfg_base);
187
188/* Transceivers specific functions */
189
190static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed);
191static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base);
192static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed);
193static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base);
194static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed);
195static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base);
196
197/* Power Management */
198
199static void smsc_ircc_suspend(struct smsc_ircc_cb *self);
200static void smsc_ircc_wakeup(struct smsc_ircc_cb *self);
201static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
202
203
204/* Transceivers for SMSC-ircc */
205
206static smsc_transceiver_t smsc_transceivers[]=
207{
208 { "Toshiba Satellite 1800 (GP data pin select)", smsc_ircc_set_transceiver_toshiba_sat1800, smsc_ircc_probe_transceiver_toshiba_sat1800},
209 { "Fast pin select", smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select, smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select},
210 { "ATC IRMode", smsc_ircc_set_transceiver_smsc_ircc_atc, smsc_ircc_probe_transceiver_smsc_ircc_atc},
211 { NULL, NULL}
212};
213#define SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS (DIM(smsc_transceivers)-1)
214
215/* SMC SuperIO chipsets definitions */
216
217#define KEY55_1 0 /* SuperIO Configuration mode with Key <0x55> */
218#define KEY55_2 1 /* SuperIO Configuration mode with Key <0x55,0x55> */
219#define NoIRDA 2 /* SuperIO Chip has no IRDA Port */
220#define SIR 0 /* SuperIO Chip has only slow IRDA */
221#define FIR 4 /* SuperIO Chip has fast IRDA */
222#define SERx4 8 /* SuperIO Chip supports 115,2 KBaud * 4=460,8 KBaud */
223
224static smsc_chip_t __initdata fdc_chips_flat[]=
225{
226 /* Base address 0x3f0 or 0x370 */
227 { "37C44", KEY55_1|NoIRDA, 0x00, 0x00 }, /* This chip cannot be detected */
228 { "37C665GT", KEY55_2|NoIRDA, 0x65, 0x01 },
229 { "37C665GT", KEY55_2|NoIRDA, 0x66, 0x01 },
230 { "37C669", KEY55_2|SIR|SERx4, 0x03, 0x02 },
231 { "37C669", KEY55_2|SIR|SERx4, 0x04, 0x02 }, /* ID? */
232 { "37C78", KEY55_2|NoIRDA, 0x78, 0x00 },
233 { "37N769", KEY55_1|FIR|SERx4, 0x28, 0x00 },
234 { "37N869", KEY55_1|FIR|SERx4, 0x29, 0x00 },
235 { NULL }
236};
237
238static smsc_chip_t __initdata fdc_chips_paged[]=
239{
240 /* Base address 0x3f0 or 0x370 */
241 { "37B72X", KEY55_1|SIR|SERx4, 0x4c, 0x00 },
242 { "37B77X", KEY55_1|SIR|SERx4, 0x43, 0x00 },
243 { "37B78X", KEY55_1|SIR|SERx4, 0x44, 0x00 },
244 { "37B80X", KEY55_1|SIR|SERx4, 0x42, 0x00 },
245 { "37C67X", KEY55_1|FIR|SERx4, 0x40, 0x00 },
246 { "37C93X", KEY55_2|SIR|SERx4, 0x02, 0x01 },
247 { "37C93XAPM", KEY55_1|SIR|SERx4, 0x30, 0x01 },
248 { "37C93XFR", KEY55_2|FIR|SERx4, 0x03, 0x01 },
249 { "37M707", KEY55_1|SIR|SERx4, 0x42, 0x00 },
250 { "37M81X", KEY55_1|SIR|SERx4, 0x4d, 0x00 },
251 { "37N958FR", KEY55_1|FIR|SERx4, 0x09, 0x04 },
252 { "37N971", KEY55_1|FIR|SERx4, 0x0a, 0x00 },
253 { "37N972", KEY55_1|FIR|SERx4, 0x0b, 0x00 },
254 { NULL }
255};
256
257static smsc_chip_t __initdata lpc_chips_flat[]=
258{
259 /* Base address 0x2E or 0x4E */
260 { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 },
261 { "47N267", KEY55_1|FIR|SERx4, 0x5e, 0x00 },
262 { NULL }
263};
264
265static smsc_chip_t __initdata lpc_chips_paged[]=
266{
267 /* Base address 0x2E or 0x4E */
268 { "47B27X", KEY55_1|SIR|SERx4, 0x51, 0x00 },
269 { "47B37X", KEY55_1|SIR|SERx4, 0x52, 0x00 },
270 { "47M10X", KEY55_1|SIR|SERx4, 0x59, 0x00 },
271 { "47M120", KEY55_1|NoIRDA|SERx4, 0x5c, 0x00 },
272 { "47M13X", KEY55_1|SIR|SERx4, 0x59, 0x00 },
273 { "47M14X", KEY55_1|SIR|SERx4, 0x5f, 0x00 },
274 { "47N252", KEY55_1|FIR|SERx4, 0x0e, 0x00 },
275 { "47S42X", KEY55_1|SIR|SERx4, 0x57, 0x00 },
276 { NULL }
277};
278
279#define SMSCSIO_TYPE_FDC 1
280#define SMSCSIO_TYPE_LPC 2
281#define SMSCSIO_TYPE_FLAT 4
282#define SMSCSIO_TYPE_PAGED 8
283
284static smsc_chip_address_t __initdata possible_addresses[]=
285{
286 {0x3f0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
287 {0x370, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
288 {0xe0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
289 {0x2e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
290 {0x4e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED},
291 {0,0}
292};
293
294/* Globals */
295
296static struct smsc_ircc_cb *dev_self[] = { NULL, NULL};
297
298static int ircc_irq=255;
299static int ircc_dma=255;
300static int ircc_fir=0;
301static int ircc_sir=0;
302static int ircc_cfg=0;
303static int ircc_transceiver=0;
304
305static unsigned short dev_count=0;
306
307static inline void register_bank(int iobase, int bank)
308{
309 outb(((inb(iobase+IRCC_MASTER) & 0xf0) | (bank & 0x07)),
310 iobase+IRCC_MASTER);
311}
312
313
314/*******************************************************************************
315 *
316 *
317 * SMSC-ircc stuff
318 *
319 *
320 *******************************************************************************/
321
322/*
323 * Function smsc_ircc_init ()
324 *
325 * Initialize chip. Just try to find out how many chips we are dealing with
326 * and where they are
327 */
328static int __init smsc_ircc_init(void)
329{
330 int ret=-ENODEV;
331
332 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
333
334 dev_count=0;
335
336 if ((ircc_fir>0)&&(ircc_sir>0)) {
337 IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir);
338 IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir);
339
340 if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq) == 0)
341 return 0;
342
343 return -ENODEV;
344 }
345
346 /* try user provided configuration register base address */
347 if (ircc_cfg>0) {
348 IRDA_MESSAGE(" Overriding configuration address 0x%04x\n",
349 ircc_cfg);
350 if (!smsc_superio_fdc(ircc_cfg))
351 ret = 0;
352 if (!smsc_superio_lpc(ircc_cfg))
353 ret = 0;
354 }
355
356 if(smsc_ircc_look_for_chips()>0) ret = 0;
357
358 return ret;
359}
360
361/*
362 * Function smsc_ircc_open (firbase, sirbase, dma, irq)
363 *
364 * Try to open driver instance
365 *
366 */
367static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
368{
369 struct smsc_ircc_cb *self;
370 struct net_device *dev;
371 int err;
372
373 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
374
375 err = smsc_ircc_present(fir_base, sir_base);
376 if(err)
377 goto err_out;
378
379 err = -ENOMEM;
380 if (dev_count > DIM(dev_self)) {
381 IRDA_WARNING("%s(), too many devices!\n", __FUNCTION__);
382 goto err_out1;
383 }
384
385 /*
386 * Allocate new instance of the driver
387 */
388 dev = alloc_irdadev(sizeof(struct smsc_ircc_cb));
389 if (!dev) {
390 IRDA_WARNING("%s() can't allocate net device\n", __FUNCTION__);
391 goto err_out1;
392 }
393
394 SET_MODULE_OWNER(dev);
395
396 dev->hard_start_xmit = smsc_ircc_hard_xmit_sir;
397#if SMSC_IRCC2_C_NET_TIMEOUT
398 dev->tx_timeout = smsc_ircc_timeout;
399 dev->watchdog_timeo = HZ*2; /* Allow enough time for speed change */
400#endif
401 dev->open = smsc_ircc_net_open;
402 dev->stop = smsc_ircc_net_close;
403 dev->do_ioctl = smsc_ircc_net_ioctl;
404 dev->get_stats = smsc_ircc_net_get_stats;
405
406 self = dev->priv;
407 self->netdev = dev;
408
409 /* Make ifconfig display some details */
410 dev->base_addr = self->io.fir_base = fir_base;
411 dev->irq = self->io.irq = irq;
412
413 /* Need to store self somewhere */
414 dev_self[dev_count++] = self;
415 spin_lock_init(&self->lock);
416
417 self->rx_buff.truesize = SMSC_IRCC2_RX_BUFF_TRUESIZE;
418 self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
419
420 self->rx_buff.head =
421 dma_alloc_coherent(NULL, self->rx_buff.truesize,
422 &self->rx_buff_dma, GFP_KERNEL);
423 if (self->rx_buff.head == NULL) {
424 IRDA_ERROR("%s, Can't allocate memory for receive buffer!\n",
425 driver_name);
426 goto err_out2;
427 }
428
429 self->tx_buff.head =
430 dma_alloc_coherent(NULL, self->tx_buff.truesize,
431 &self->tx_buff_dma, GFP_KERNEL);
432 if (self->tx_buff.head == NULL) {
433 IRDA_ERROR("%s, Can't allocate memory for transmit buffer!\n",
434 driver_name);
435 goto err_out3;
436 }
437
438 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
439 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
440
441 self->rx_buff.in_frame = FALSE;
442 self->rx_buff.state = OUTSIDE_FRAME;
443 self->tx_buff.data = self->tx_buff.head;
444 self->rx_buff.data = self->rx_buff.head;
445
446 smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq);
447
448 smsc_ircc_setup_qos(self);
449
450 smsc_ircc_init_chip(self);
451
452 if(ircc_transceiver > 0 &&
453 ircc_transceiver < SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS)
454 self->transceiver = ircc_transceiver;
455 else
456 smsc_ircc_probe_transceiver(self);
457
458 err = register_netdev(self->netdev);
459 if(err) {
460 IRDA_ERROR("%s, Network device registration failed!\n",
461 driver_name);
462 goto err_out4;
463 }
464
465 self->pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, smsc_ircc_pmproc);
466 if (self->pmdev)
467 self->pmdev->data = self;
468
469 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
470
471 return 0;
472 err_out4:
473 dma_free_coherent(NULL, self->tx_buff.truesize,
474 self->tx_buff.head, self->tx_buff_dma);
475 err_out3:
476 dma_free_coherent(NULL, self->rx_buff.truesize,
477 self->rx_buff.head, self->rx_buff_dma);
478 err_out2:
479 free_netdev(self->netdev);
480 dev_self[--dev_count] = NULL;
481 err_out1:
482 release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
483 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
484 err_out:
485 return err;
486}
487
488/*
489 * Function smsc_ircc_present(fir_base, sir_base)
490 *
491 * Check the smsc-ircc chip presence
492 *
493 */
494static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
495{
496 unsigned char low, high, chip, config, dma, irq, version;
497
498 if (!request_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT,
499 driver_name)) {
500 IRDA_WARNING("%s: can't get fir_base of 0x%03x\n",
501 __FUNCTION__, fir_base);
502 goto out1;
503 }
504
505 if (!request_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT,
506 driver_name)) {
507 IRDA_WARNING("%s: can't get sir_base of 0x%03x\n",
508 __FUNCTION__, sir_base);
509 goto out2;
510 }
511
512 register_bank(fir_base, 3);
513
514 high = inb(fir_base+IRCC_ID_HIGH);
515 low = inb(fir_base+IRCC_ID_LOW);
516 chip = inb(fir_base+IRCC_CHIP_ID);
517 version = inb(fir_base+IRCC_VERSION);
518 config = inb(fir_base+IRCC_INTERFACE);
519 dma = config & IRCC_INTERFACE_DMA_MASK;
520 irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
521
522 if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
523 IRDA_WARNING("%s(), addr 0x%04x - no device found!\n",
524 __FUNCTION__, fir_base);
525 goto out3;
526 }
527 IRDA_MESSAGE("SMsC IrDA Controller found\n IrCC version %d.%d, "
528 "firport 0x%03x, sirport 0x%03x dma=%d, irq=%d\n",
529 chip & 0x0f, version, fir_base, sir_base, dma, irq);
530
531 return 0;
532 out3:
533 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
534 out2:
535 release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
536 out1:
537 return -ENODEV;
538}
539
540/*
541 * Function smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq)
542 *
543 * Setup I/O
544 *
545 */
546static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
547 unsigned int fir_base, unsigned int sir_base,
548 u8 dma, u8 irq)
549{
550 unsigned char config, chip_dma, chip_irq;
551
552 register_bank(fir_base, 3);
553 config = inb(fir_base+IRCC_INTERFACE);
554 chip_dma = config & IRCC_INTERFACE_DMA_MASK;
555 chip_irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
556
557 self->io.fir_base = fir_base;
558 self->io.sir_base = sir_base;
559 self->io.fir_ext = SMSC_IRCC2_FIR_CHIP_IO_EXTENT;
560 self->io.sir_ext = SMSC_IRCC2_SIR_CHIP_IO_EXTENT;
561 self->io.fifo_size = SMSC_IRCC2_FIFO_SIZE;
562 self->io.speed = SMSC_IRCC2_C_IRDA_FALLBACK_SPEED;
563
564 if (irq < 255) {
565 if (irq != chip_irq)
566 IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n",
567 driver_name, chip_irq, irq);
568 self->io.irq = irq;
569 }
570 else
571 self->io.irq = chip_irq;
572
573 if (dma < 255) {
574 if (dma != chip_dma)
575 IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n",
576 driver_name, chip_dma, dma);
577 self->io.dma = dma;
578 }
579 else
580 self->io.dma = chip_dma;
581
582}
583
584/*
585 * Function smsc_ircc_setup_qos(self)
586 *
587 * Setup qos
588 *
589 */
590static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
591{
592 /* Initialize QoS for this device */
593 irda_init_max_qos_capabilies(&self->qos);
594
595 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
596 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
597
598 self->qos.min_turn_time.bits = SMSC_IRCC2_MIN_TURN_TIME;
599 self->qos.window_size.bits = SMSC_IRCC2_WINDOW_SIZE;
600 irda_qos_bits_to_value(&self->qos);
601}
602
603/*
604 * Function smsc_ircc_init_chip(self)
605 *
606 * Init chip
607 *
608 */
609static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
610{
611 int iobase, ir_mode, ctrl, fast;
612
613 IRDA_ASSERT( self != NULL, return; );
614 iobase = self->io.fir_base;
615
616 ir_mode = IRCC_CFGA_IRDA_SIR_A;
617 ctrl = 0;
618 fast = 0;
619
620 register_bank(iobase, 0);
621 outb(IRCC_MASTER_RESET, iobase+IRCC_MASTER);
622 outb(0x00, iobase+IRCC_MASTER);
623
624 register_bank(iobase, 1);
625 outb(((inb(iobase+IRCC_SCE_CFGA) & 0x87) | ir_mode),
626 iobase+IRCC_SCE_CFGA);
627
628#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */
629 outb(((inb(iobase+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
630 iobase+IRCC_SCE_CFGB);
631#else
632 outb(((inb(iobase+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
633 iobase+IRCC_SCE_CFGB);
634#endif
635 (void) inb(iobase+IRCC_FIFO_THRESHOLD);
636 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase+IRCC_FIFO_THRESHOLD);
637
638 register_bank(iobase, 4);
639 outb((inb(iobase+IRCC_CONTROL) & 0x30) | ctrl, iobase+IRCC_CONTROL);
640
641 register_bank(iobase, 0);
642 outb(fast, iobase+IRCC_LCR_A);
643
644 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
645
646 /* Power on device */
647 outb(0x00, iobase+IRCC_MASTER);
648}
649
650/*
651 * Function smsc_ircc_net_ioctl (dev, rq, cmd)
652 *
653 * Process IOCTL commands for this device
654 *
655 */
656static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
657{
658 struct if_irda_req *irq = (struct if_irda_req *) rq;
659 struct smsc_ircc_cb *self;
660 unsigned long flags;
661 int ret = 0;
662
663 IRDA_ASSERT(dev != NULL, return -1;);
664
665 self = dev->priv;
666
667 IRDA_ASSERT(self != NULL, return -1;);
668
669 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
670
671 switch (cmd) {
672 case SIOCSBANDWIDTH: /* Set bandwidth */
673 if (!capable(CAP_NET_ADMIN))
674 ret = -EPERM;
675 else {
676 /* Make sure we are the only one touching
677 * self->io.speed and the hardware - Jean II */
678 spin_lock_irqsave(&self->lock, flags);
679 smsc_ircc_change_speed(self, irq->ifr_baudrate);
680 spin_unlock_irqrestore(&self->lock, flags);
681 }
682 break;
683 case SIOCSMEDIABUSY: /* Set media busy */
684 if (!capable(CAP_NET_ADMIN)) {
685 ret = -EPERM;
686 break;
687 }
688
689 irda_device_set_media_busy(self->netdev, TRUE);
690 break;
691 case SIOCGRECEIVING: /* Check if we are receiving right now */
692 irq->ifr_receiving = smsc_ircc_is_receiving(self);
693 break;
694 #if 0
695 case SIOCSDTRRTS:
696 if (!capable(CAP_NET_ADMIN)) {
697 ret = -EPERM;
698 break;
699 }
700 smsc_ircc_sir_set_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
701 break;
702 #endif
703 default:
704 ret = -EOPNOTSUPP;
705 }
706
707 return ret;
708}
709
710static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
711{
712 struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) dev->priv;
713
714 return &self->stats;
715}
716
717#if SMSC_IRCC2_C_NET_TIMEOUT
718/*
719 * Function smsc_ircc_timeout (struct net_device *dev)
720 *
721 * The networking timeout management.
722 *
723 */
724
725static void smsc_ircc_timeout(struct net_device *dev)
726{
727 struct smsc_ircc_cb *self;
728 unsigned long flags;
729
730 self = (struct smsc_ircc_cb *) dev->priv;
731
732 IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n",
733 dev->name, self->io.speed);
734 spin_lock_irqsave(&self->lock, flags);
735 smsc_ircc_sir_start(self);
736 smsc_ircc_change_speed(self, self->io.speed);
737 dev->trans_start = jiffies;
738 netif_wake_queue(dev);
739 spin_unlock_irqrestore(&self->lock, flags);
740}
741#endif
742
743/*
744 * Function smsc_ircc_hard_xmit_sir (struct sk_buff *skb, struct net_device *dev)
745 *
746 * Transmits the current frame until FIFO is full, then
747 * waits until the next transmit interrupt, and continues until the
748 * frame is transmitted.
749 */
750int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
751{
752 struct smsc_ircc_cb *self;
753 unsigned long flags;
754 int iobase;
755 s32 speed;
756
757 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
758
759 IRDA_ASSERT(dev != NULL, return 0;);
760
761 self = (struct smsc_ircc_cb *) dev->priv;
762 IRDA_ASSERT(self != NULL, return 0;);
763
764 iobase = self->io.sir_base;
765
766 netif_stop_queue(dev);
767
768 /* Make sure test of self->io.speed & speed change are atomic */
769 spin_lock_irqsave(&self->lock, flags);
770
771 /* Check if we need to change the speed */
772 speed = irda_get_next_speed(skb);
773 if ((speed != self->io.speed) && (speed != -1)) {
774 /* Check for empty frame */
775 if (!skb->len) {
776 /*
777 * We send frames one by one in SIR mode (no
778 * pipelining), so at this point, if we were sending
779 * a previous frame, we just received the interrupt
780 * telling us it is finished (UART_IIR_THRI).
781 * Therefore, waiting for the transmitter to really
782 * finish draining the fifo won't take too long.
783 * And the interrupt handler is not expected to run.
784 * - Jean II */
785 smsc_ircc_sir_wait_hw_transmitter_finish(self);
786 smsc_ircc_change_speed(self, speed);
787 spin_unlock_irqrestore(&self->lock, flags);
788 dev_kfree_skb(skb);
789 return 0;
790 } else {
791 self->new_speed = speed;
792 }
793 }
794
795 /* Init tx buffer */
796 self->tx_buff.data = self->tx_buff.head;
797
798 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
799 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
800 self->tx_buff.truesize);
801
802 self->stats.tx_bytes += self->tx_buff.len;
803
804 /* Turn on transmit finished interrupt. Will fire immediately! */
805 outb(UART_IER_THRI, iobase+UART_IER);
806
807 spin_unlock_irqrestore(&self->lock, flags);
808
809 dev_kfree_skb(skb);
810
811 return 0;
812}
813
814/*
815 * Function smsc_ircc_set_fir_speed (self, baud)
816 *
817 * Change the speed of the device
818 *
819 */
820static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
821{
822 int fir_base, ir_mode, ctrl, fast;
823
824 IRDA_ASSERT(self != NULL, return;);
825 fir_base = self->io.fir_base;
826
827 self->io.speed = speed;
828
829 switch(speed) {
830 default:
831 case 576000:
832 ir_mode = IRCC_CFGA_IRDA_HDLC;
833 ctrl = IRCC_CRC;
834 fast = 0;
835 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__);
836 break;
837 case 1152000:
838 ir_mode = IRCC_CFGA_IRDA_HDLC;
839 ctrl = IRCC_1152 | IRCC_CRC;
840 fast = IRCC_LCR_A_FAST | IRCC_LCR_A_GP_DATA;
841 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n",
842 __FUNCTION__);
843 break;
844 case 4000000:
845 ir_mode = IRCC_CFGA_IRDA_4PPM;
846 ctrl = IRCC_CRC;
847 fast = IRCC_LCR_A_FAST;
848 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n",
849 __FUNCTION__);
850 break;
851 }
852 #if 0
853 Now in tranceiver!
854 /* This causes an interrupt */
855 register_bank(fir_base, 0);
856 outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast, fir_base+IRCC_LCR_A);
857 #endif
858
859 register_bank(fir_base, 1);
860 outb(((inb(fir_base+IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | ir_mode), fir_base+IRCC_SCE_CFGA);
861
862 register_bank(fir_base, 4);
863 outb((inb(fir_base+IRCC_CONTROL) & 0x30) | ctrl, fir_base+IRCC_CONTROL);
864}
865
866/*
867 * Function smsc_ircc_fir_start(self)
868 *
869 * Change the speed of the device
870 *
871 */
872static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
873{
874 struct net_device *dev;
875 int fir_base;
876
877 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
878
879 IRDA_ASSERT(self != NULL, return;);
880 dev = self->netdev;
881 IRDA_ASSERT(dev != NULL, return;);
882
883 fir_base = self->io.fir_base;
884
885 /* Reset everything */
886
887 /* Install FIR transmit handler */
888 dev->hard_start_xmit = smsc_ircc_hard_xmit_fir;
889
890 /* Clear FIFO */
891 outb(inb(fir_base+IRCC_LCR_A)|IRCC_LCR_A_FIFO_RESET, fir_base+IRCC_LCR_A);
892
893 /* Enable interrupt */
894 /*outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base+IRCC_IER);*/
895
896 register_bank(fir_base, 1);
897
898 /* Select the TX/RX interface */
899#ifdef SMSC_669 /* Uses pin 88/89 for Rx/Tx */
900 outb(((inb(fir_base+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
901 fir_base+IRCC_SCE_CFGB);
902#else
903 outb(((inb(fir_base+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
904 fir_base+IRCC_SCE_CFGB);
905#endif
906 (void) inb(fir_base+IRCC_FIFO_THRESHOLD);
907
908 /* Enable SCE interrupts */
909 outb(0, fir_base+IRCC_MASTER);
910 register_bank(fir_base, 0);
911 outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base+IRCC_IER);
912 outb(IRCC_MASTER_INT_EN, fir_base+IRCC_MASTER);
913}
914
915/*
916 * Function smsc_ircc_fir_stop(self, baud)
917 *
918 * Change the speed of the device
919 *
920 */
921static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
922{
923 int fir_base;
924
925 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
926
927 IRDA_ASSERT(self != NULL, return;);
928
929 fir_base = self->io.fir_base;
930 register_bank(fir_base, 0);
931 /*outb(IRCC_MASTER_RESET, fir_base+IRCC_MASTER);*/
932 outb(inb(fir_base+IRCC_LCR_B) & IRCC_LCR_B_SIP_ENABLE, fir_base+IRCC_LCR_B);
933}
934
935
936/*
937 * Function smsc_ircc_change_speed(self, baud)
938 *
939 * Change the speed of the device
940 *
941 * This function *must* be called with spinlock held, because it may
942 * be called from the irq handler. - Jean II
943 */
944static void smsc_ircc_change_speed(void *priv, u32 speed)
945{
946 struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) priv;
947 struct net_device *dev;
948 int iobase;
949 int last_speed_was_sir;
950
951 IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed);
952
953 IRDA_ASSERT(self != NULL, return;);
954 dev = self->netdev;
955 iobase = self->io.fir_base;
956
957 last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED;
958
959 #if 0
960 /* Temp Hack */
961 speed= 1152000;
962 self->io.speed = speed;
963 last_speed_was_sir = 0;
964 smsc_ircc_fir_start(self);
965 #endif
966
967 if(self->io.speed == 0)
968 smsc_ircc_sir_start(self);
969
970 #if 0
971 if(!last_speed_was_sir) speed = self->io.speed;
972 #endif
973
974 if(self->io.speed != speed) smsc_ircc_set_transceiver_for_speed(self, speed);
975
976 self->io.speed = speed;
977
978 if(speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
979 if(!last_speed_was_sir) {
980 smsc_ircc_fir_stop(self);
981 smsc_ircc_sir_start(self);
982 }
983 smsc_ircc_set_sir_speed(self, speed);
984 }
985 else {
986 if(last_speed_was_sir) {
987 #if SMSC_IRCC2_C_SIR_STOP
988 smsc_ircc_sir_stop(self);
989 #endif
990 smsc_ircc_fir_start(self);
991 }
992 smsc_ircc_set_fir_speed(self, speed);
993
994 #if 0
995 self->tx_buff.len = 10;
996 self->tx_buff.data = self->tx_buff.head;
997
998 smsc_ircc_dma_xmit(self, iobase, 4000);
999 #endif
1000 /* Be ready for incoming frames */
1001 smsc_ircc_dma_receive(self, iobase);
1002 }
1003
1004 netif_wake_queue(dev);
1005}
1006
1007/*
1008 * Function smsc_ircc_set_sir_speed (self, speed)
1009 *
1010 * Set speed of IrDA port to specified baudrate
1011 *
1012 */
1013void smsc_ircc_set_sir_speed(void *priv, __u32 speed)
1014{
1015 struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) priv;
1016 int iobase;
1017 int fcr; /* FIFO control reg */
1018 int lcr; /* Line control reg */
1019 int divisor;
1020
1021 IRDA_DEBUG(0, "%s(), Setting speed to: %d\n", __FUNCTION__, speed);
1022
1023 IRDA_ASSERT(self != NULL, return;);
1024 iobase = self->io.sir_base;
1025
1026 /* Update accounting for new speed */
1027 self->io.speed = speed;
1028
1029 /* Turn off interrupts */
1030 outb(0, iobase+UART_IER);
1031
1032 divisor = SMSC_IRCC2_MAX_SIR_SPEED/speed;
1033
1034 fcr = UART_FCR_ENABLE_FIFO;
1035
1036 /*
1037 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
1038 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
1039 * about this timeout since it will always be fast enough.
1040 */
1041 if (self->io.speed < 38400)
1042 fcr |= UART_FCR_TRIGGER_1;
1043 else
1044 fcr |= UART_FCR_TRIGGER_14;
1045
1046 /* IrDA ports use 8N1 */
1047 lcr = UART_LCR_WLEN8;
1048
1049 outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */
1050 outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */
1051 outb(divisor >> 8, iobase+UART_DLM);
1052 outb(lcr, iobase+UART_LCR); /* Set 8N1 */
1053 outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
1054
1055 /* Turn on interrups */
1056 outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, iobase+UART_IER);
1057
1058 IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed);
1059}
1060
1061
1062/*
1063 * Function smsc_ircc_hard_xmit_fir (skb, dev)
1064 *
1065 * Transmit the frame!
1066 *
1067 */
1068static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1069{
1070 struct smsc_ircc_cb *self;
1071 unsigned long flags;
1072 s32 speed;
1073 int iobase;
1074 int mtt;
1075
1076 IRDA_ASSERT(dev != NULL, return 0;);
1077 self = (struct smsc_ircc_cb *) dev->priv;
1078 IRDA_ASSERT(self != NULL, return 0;);
1079
1080 iobase = self->io.fir_base;
1081
1082 netif_stop_queue(dev);
1083
1084 /* Make sure test of self->io.speed & speed change are atomic */
1085 spin_lock_irqsave(&self->lock, flags);
1086
1087 /* Check if we need to change the speed after this frame */
1088 speed = irda_get_next_speed(skb);
1089 if ((speed != self->io.speed) && (speed != -1)) {
1090 /* Check for empty frame */
1091 if (!skb->len) {
1092 /* Note : you should make sure that speed changes
1093 * are not going to corrupt any outgoing frame.
1094 * Look at nsc-ircc for the gory details - Jean II */
1095 smsc_ircc_change_speed(self, speed);
1096 spin_unlock_irqrestore(&self->lock, flags);
1097 dev_kfree_skb(skb);
1098 return 0;
1099 } else
1100 self->new_speed = speed;
1101 }
1102
1103 memcpy(self->tx_buff.head, skb->data, skb->len);
1104
1105 self->tx_buff.len = skb->len;
1106 self->tx_buff.data = self->tx_buff.head;
1107
1108 mtt = irda_get_mtt(skb);
1109 if (mtt) {
1110 int bofs;
1111
1112 /*
1113 * Compute how many BOFs (STA or PA's) we need to waste the
1114 * min turn time given the speed of the link.
1115 */
1116 bofs = mtt * (self->io.speed / 1000) / 8000;
1117 if (bofs > 4095)
1118 bofs = 4095;
1119
1120 smsc_ircc_dma_xmit(self, iobase, bofs);
1121 } else {
1122 /* Transmit frame */
1123 smsc_ircc_dma_xmit(self, iobase, 0);
1124 }
1125 spin_unlock_irqrestore(&self->lock, flags);
1126 dev_kfree_skb(skb);
1127
1128 return 0;
1129}
1130
1131/*
1132 * Function smsc_ircc_dma_xmit (self, iobase)
1133 *
1134 * Transmit data using DMA
1135 *
1136 */
1137static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs)
1138{
1139 u8 ctrl;
1140
1141 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1142#if 1
1143 /* Disable Rx */
1144 register_bank(iobase, 0);
1145 outb(0x00, iobase+IRCC_LCR_B);
1146#endif
1147 register_bank(iobase, 1);
1148 outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1149 iobase+IRCC_SCE_CFGB);
1150
1151 self->io.direction = IO_XMIT;
1152
1153 /* Set BOF additional count for generating the min turn time */
1154 register_bank(iobase, 4);
1155 outb(bofs & 0xff, iobase+IRCC_BOF_COUNT_LO);
1156 ctrl = inb(iobase+IRCC_CONTROL) & 0xf0;
1157 outb(ctrl | ((bofs >> 8) & 0x0f), iobase+IRCC_BOF_COUNT_HI);
1158
1159 /* Set max Tx frame size */
1160 outb(self->tx_buff.len >> 8, iobase+IRCC_TX_SIZE_HI);
1161 outb(self->tx_buff.len & 0xff, iobase+IRCC_TX_SIZE_LO);
1162
1163 /*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/
1164
1165 /* Enable burst mode chip Tx DMA */
1166 register_bank(iobase, 1);
1167 outb(inb(iobase+IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
1168 IRCC_CFGB_DMA_BURST, iobase+IRCC_SCE_CFGB);
1169
1170 /* Setup DMA controller (must be done after enabling chip DMA) */
1171 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
1172 DMA_TX_MODE);
1173
1174 /* Enable interrupt */
1175
1176 register_bank(iobase, 0);
1177 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase+IRCC_IER);
1178 outb(IRCC_MASTER_INT_EN, iobase+IRCC_MASTER);
1179
1180 /* Enable transmit */
1181 outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase+IRCC_LCR_B);
1182}
1183
1184/*
1185 * Function smsc_ircc_dma_xmit_complete (self)
1186 *
1187 * The transfer of a frame in finished. This function will only be called
1188 * by the interrupt handler
1189 *
1190 */
1191static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase)
1192{
1193 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1194#if 0
1195 /* Disable Tx */
1196 register_bank(iobase, 0);
1197 outb(0x00, iobase+IRCC_LCR_B);
1198#endif
1199 register_bank(self->io.fir_base, 1);
1200 outb(inb(self->io.fir_base+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1201 self->io.fir_base+IRCC_SCE_CFGB);
1202
1203 /* Check for underrun! */
1204 register_bank(iobase, 0);
1205 if (inb(iobase+IRCC_LSR) & IRCC_LSR_UNDERRUN) {
1206 self->stats.tx_errors++;
1207 self->stats.tx_fifo_errors++;
1208
1209 /* Reset error condition */
1210 register_bank(iobase, 0);
1211 outb(IRCC_MASTER_ERROR_RESET, iobase+IRCC_MASTER);
1212 outb(0x00, iobase+IRCC_MASTER);
1213 } else {
1214 self->stats.tx_packets++;
1215 self->stats.tx_bytes += self->tx_buff.len;
1216 }
1217
1218 /* Check if it's time to change the speed */
1219 if (self->new_speed) {
1220 smsc_ircc_change_speed(self, self->new_speed);
1221 self->new_speed = 0;
1222 }
1223
1224 netif_wake_queue(self->netdev);
1225}
1226
1227/*
1228 * Function smsc_ircc_dma_receive(self)
1229 *
1230 * Get ready for receiving a frame. The device will initiate a DMA
1231 * if it starts to receive a frame.
1232 *
1233 */
1234static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase)
1235{
1236#if 0
1237 /* Turn off chip DMA */
1238 register_bank(iobase, 1);
1239 outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1240 iobase+IRCC_SCE_CFGB);
1241#endif
1242
1243 /* Disable Tx */
1244 register_bank(iobase, 0);
1245 outb(0x00, iobase+IRCC_LCR_B);
1246
1247 /* Turn off chip DMA */
1248 register_bank(iobase, 1);
1249 outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1250 iobase+IRCC_SCE_CFGB);
1251
1252 self->io.direction = IO_RECV;
1253 self->rx_buff.data = self->rx_buff.head;
1254
1255 /* Set max Rx frame size */
1256 register_bank(iobase, 4);
1257 outb((2050 >> 8) & 0x0f, iobase+IRCC_RX_SIZE_HI);
1258 outb(2050 & 0xff, iobase+IRCC_RX_SIZE_LO);
1259
1260 /* Setup DMA controller */
1261 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
1262 DMA_RX_MODE);
1263
1264 /* Enable burst mode chip Rx DMA */
1265 register_bank(iobase, 1);
1266 outb(inb(iobase+IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
1267 IRCC_CFGB_DMA_BURST, iobase+IRCC_SCE_CFGB);
1268
1269 /* Enable interrupt */
1270 register_bank(iobase, 0);
1271 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase+IRCC_IER);
1272 outb(IRCC_MASTER_INT_EN, iobase+IRCC_MASTER);
1273
1274
1275 /* Enable receiver */
1276 register_bank(iobase, 0);
1277 outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE,
1278 iobase+IRCC_LCR_B);
1279
1280 return 0;
1281}
1282
1283/*
1284 * Function smsc_ircc_dma_receive_complete(self, iobase)
1285 *
1286 * Finished with receiving frames
1287 *
1288 */
1289static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase)
1290{
1291 struct sk_buff *skb;
1292 int len, msgcnt, lsr;
1293
1294 register_bank(iobase, 0);
1295
1296 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1297#if 0
1298 /* Disable Rx */
1299 register_bank(iobase, 0);
1300 outb(0x00, iobase+IRCC_LCR_B);
1301#endif
1302 register_bank(iobase, 0);
1303 outb(inb(iobase+IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase+IRCC_LSAR);
1304 lsr= inb(iobase+IRCC_LSR);
1305 msgcnt = inb(iobase+IRCC_LCR_B) & 0x08;
1306
1307 IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__,
1308 get_dma_residue(self->io.dma));
1309
1310 len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
1311
1312 /* Look for errors
1313 */
1314
1315 if(lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
1316 self->stats.rx_errors++;
1317 if(lsr & IRCC_LSR_FRAME_ERROR) self->stats.rx_frame_errors++;
1318 if(lsr & IRCC_LSR_CRC_ERROR) self->stats.rx_crc_errors++;
1319 if(lsr & IRCC_LSR_SIZE_ERROR) self->stats.rx_length_errors++;
1320 if(lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN)) self->stats.rx_length_errors++;
1321 return;
1322 }
1323 /* Remove CRC */
1324 if (self->io.speed < 4000000)
1325 len -= 2;
1326 else
1327 len -= 4;
1328
1329 if ((len < 2) || (len > 2050)) {
1330 IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len);
1331 return;
1332 }
1333 IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len);
1334
1335 skb = dev_alloc_skb(len+1);
1336 if (!skb) {
1337 IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",
1338 __FUNCTION__);
1339 return;
1340 }
1341 /* Make sure IP header gets aligned */
1342 skb_reserve(skb, 1);
1343
1344 memcpy(skb_put(skb, len), self->rx_buff.data, len);
1345 self->stats.rx_packets++;
1346 self->stats.rx_bytes += len;
1347
1348 skb->dev = self->netdev;
1349 skb->mac.raw = skb->data;
1350 skb->protocol = htons(ETH_P_IRDA);
1351 netif_rx(skb);
1352}
1353
1354/*
1355 * Function smsc_ircc_sir_receive (self)
1356 *
1357 * Receive one frame from the infrared port
1358 *
1359 */
1360static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
1361{
1362 int boguscount = 0;
1363 int iobase;
1364
1365 IRDA_ASSERT(self != NULL, return;);
1366
1367 iobase = self->io.sir_base;
1368
1369 /*
1370 * Receive all characters in Rx FIFO, unwrap and unstuff them.
1371 * async_unwrap_char will deliver all found frames
1372 */
1373 do {
1374 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
1375 inb(iobase+UART_RX));
1376
1377 /* Make sure we don't stay here to long */
1378 if (boguscount++ > 32) {
1379 IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__);
1380 break;
1381 }
1382 } while (inb(iobase+UART_LSR) & UART_LSR_DR);
1383}
1384
1385
1386/*
1387 * Function smsc_ircc_interrupt (irq, dev_id, regs)
1388 *
1389 * An interrupt from the chip has arrived. Time to do some work
1390 *
1391 */
1392static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1393{
1394 struct net_device *dev = (struct net_device *) dev_id;
1395 struct smsc_ircc_cb *self;
1396 int iobase, iir, lcra, lsr;
1397 irqreturn_t ret = IRQ_NONE;
1398
1399 if (dev == NULL) {
1400 printk(KERN_WARNING "%s: irq %d for unknown device.\n",
1401 driver_name, irq);
1402 goto irq_ret;
1403 }
1404 self = (struct smsc_ircc_cb *) dev->priv;
1405 IRDA_ASSERT(self != NULL, return IRQ_NONE;);
1406
1407 /* Serialise the interrupt handler in various CPUs, stop Tx path */
1408 spin_lock(&self->lock);
1409
1410 /* Check if we should use the SIR interrupt handler */
1411 if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
1412 ret = smsc_ircc_interrupt_sir(dev);
1413 goto irq_ret_unlock;
1414 }
1415
1416 iobase = self->io.fir_base;
1417
1418 register_bank(iobase, 0);
1419 iir = inb(iobase+IRCC_IIR);
1420 if (iir == 0)
1421 goto irq_ret_unlock;
1422 ret = IRQ_HANDLED;
1423
1424 /* Disable interrupts */
1425 outb(0, iobase+IRCC_IER);
1426 lcra = inb(iobase+IRCC_LCR_A);
1427 lsr = inb(iobase+IRCC_LSR);
1428
1429 IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir);
1430
1431 if (iir & IRCC_IIR_EOM) {
1432 if (self->io.direction == IO_RECV)
1433 smsc_ircc_dma_receive_complete(self, iobase);
1434 else
1435 smsc_ircc_dma_xmit_complete(self, iobase);
1436
1437 smsc_ircc_dma_receive(self, iobase);
1438 }
1439
1440 if (iir & IRCC_IIR_ACTIVE_FRAME) {
1441 /*printk(KERN_WARNING "%s(): Active Frame\n", __FUNCTION__);*/
1442 }
1443
1444 /* Enable interrupts again */
1445
1446 register_bank(iobase, 0);
1447 outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, iobase+IRCC_IER);
1448
1449 irq_ret_unlock:
1450 spin_unlock(&self->lock);
1451 irq_ret:
1452 return ret;
1453}
1454
1455/*
1456 * Function irport_interrupt_sir (irq, dev_id, regs)
1457 *
1458 * Interrupt handler for SIR modes
1459 */
1460static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1461{
1462 struct smsc_ircc_cb *self = dev->priv;
1463 int boguscount = 0;
1464 int iobase;
1465 int iir, lsr;
1466
1467 /* Already locked comming here in smsc_ircc_interrupt() */
1468 /*spin_lock(&self->lock);*/
1469
1470 iobase = self->io.sir_base;
1471
1472 iir = inb(iobase+UART_IIR) & UART_IIR_ID;
1473 if (iir == 0)
1474 return IRQ_NONE;
1475 while (iir) {
1476 /* Clear interrupt */
1477 lsr = inb(iobase+UART_LSR);
1478
1479 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
1480 __FUNCTION__, iir, lsr, iobase);
1481
1482 switch (iir) {
1483 case UART_IIR_RLSI:
1484 IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__);
1485 break;
1486 case UART_IIR_RDI:
1487 /* Receive interrupt */
1488 smsc_ircc_sir_receive(self);
1489 break;
1490 case UART_IIR_THRI:
1491 if (lsr & UART_LSR_THRE)
1492 /* Transmitter ready for data */
1493 smsc_ircc_sir_write_wakeup(self);
1494 break;
1495 default:
1496 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n",
1497 __FUNCTION__, iir);
1498 break;
1499 }
1500
1501 /* Make sure we don't stay here to long */
1502 if (boguscount++ > 100)
1503 break;
1504
1505 iir = inb(iobase + UART_IIR) & UART_IIR_ID;
1506 }
1507 /*spin_unlock(&self->lock);*/
1508 return IRQ_HANDLED;
1509}
1510
1511
1512#if 0 /* unused */
1513/*
1514 * Function ircc_is_receiving (self)
1515 *
1516 * Return TRUE is we are currently receiving a frame
1517 *
1518 */
1519static int ircc_is_receiving(struct smsc_ircc_cb *self)
1520{
1521 int status = FALSE;
1522 /* int iobase; */
1523
1524 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1525
1526 IRDA_ASSERT(self != NULL, return FALSE;);
1527
1528 IRDA_DEBUG(0, "%s: dma count = %d\n", __FUNCTION__,
1529 get_dma_residue(self->io.dma));
1530
1531 status = (self->rx_buff.state != OUTSIDE_FRAME);
1532
1533 return status;
1534}
1535#endif /* unused */
1536
1537
1538/*
1539 * Function smsc_ircc_net_open (dev)
1540 *
1541 * Start the device
1542 *
1543 */
1544static int smsc_ircc_net_open(struct net_device *dev)
1545{
1546 struct smsc_ircc_cb *self;
1547 int iobase;
1548 char hwname[16];
1549 unsigned long flags;
1550
1551 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1552
1553 IRDA_ASSERT(dev != NULL, return -1;);
1554 self = (struct smsc_ircc_cb *) dev->priv;
1555 IRDA_ASSERT(self != NULL, return 0;);
1556
1557 iobase = self->io.fir_base;
1558
1559 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
1560 (void *) dev)) {
1561 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
1562 __FUNCTION__, self->io.irq);
1563 return -EAGAIN;
1564 }
1565
1566 spin_lock_irqsave(&self->lock, flags);
1567 /*smsc_ircc_sir_start(self);*/
1568 self->io.speed = 0;
1569 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
1570 spin_unlock_irqrestore(&self->lock, flags);
1571
1572 /* Give self a hardware name */
1573 /* It would be cool to offer the chip revision here - Jean II */
1574 sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base);
1575
1576 /*
1577 * Open new IrLAP layer instance, now that everything should be
1578 * initialized properly
1579 */
1580 self->irlap = irlap_open(dev, &self->qos, hwname);
1581
1582 /*
1583 * Always allocate the DMA channel after the IRQ,
1584 * and clean up on failure.
1585 */
1586 if (request_dma(self->io.dma, dev->name)) {
1587 smsc_ircc_net_close(dev);
1588
1589 IRDA_WARNING("%s(), unable to allocate DMA=%d\n",
1590 __FUNCTION__, self->io.dma);
1591 return -EAGAIN;
1592 }
1593
1594 netif_start_queue(dev);
1595
1596 return 0;
1597}
1598
1599/*
1600 * Function smsc_ircc_net_close (dev)
1601 *
1602 * Stop the device
1603 *
1604 */
1605static int smsc_ircc_net_close(struct net_device *dev)
1606{
1607 struct smsc_ircc_cb *self;
1608 int iobase;
1609
1610 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1611
1612 IRDA_ASSERT(dev != NULL, return -1;);
1613 self = (struct smsc_ircc_cb *) dev->priv;
1614 IRDA_ASSERT(self != NULL, return 0;);
1615
1616 iobase = self->io.fir_base;
1617
1618 /* Stop device */
1619 netif_stop_queue(dev);
1620
1621 /* Stop and remove instance of IrLAP */
1622 if (self->irlap)
1623 irlap_close(self->irlap);
1624 self->irlap = NULL;
1625
1626 free_irq(self->io.irq, dev);
1627
1628 disable_dma(self->io.dma);
1629
1630 free_dma(self->io.dma);
1631
1632 return 0;
1633}
1634
1635
1636static void smsc_ircc_suspend(struct smsc_ircc_cb *self)
1637{
1638 IRDA_MESSAGE("%s, Suspending\n", driver_name);
1639
1640 if (self->io.suspended)
1641 return;
1642
1643 smsc_ircc_net_close(self->netdev);
1644
1645 self->io.suspended = 1;
1646}
1647
1648static void smsc_ircc_wakeup(struct smsc_ircc_cb *self)
1649{
1650 if (!self->io.suspended)
1651 return;
1652
1653 /* The code was doing a "cli()" here, but this can't be right.
1654 * If you need protection, do it in net_open with a spinlock
1655 * or give a good reason. - Jean II */
1656
1657 smsc_ircc_net_open(self->netdev);
1658
1659 IRDA_MESSAGE("%s, Waking up\n", driver_name);
1660}
1661
1662static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data)
1663{
1664 struct smsc_ircc_cb *self = (struct smsc_ircc_cb*) dev->data;
1665 if (self) {
1666 switch (rqst) {
1667 case PM_SUSPEND:
1668 smsc_ircc_suspend(self);
1669 break;
1670 case PM_RESUME:
1671 smsc_ircc_wakeup(self);
1672 break;
1673 }
1674 }
1675 return 0;
1676}
1677
1678/*
1679 * Function smsc_ircc_close (self)
1680 *
1681 * Close driver instance
1682 *
1683 */
1684static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1685{
1686 int iobase;
1687 unsigned long flags;
1688
1689 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1690
1691 IRDA_ASSERT(self != NULL, return -1;);
1692
1693 iobase = self->io.fir_base;
1694
1695 if (self->pmdev)
1696 pm_unregister(self->pmdev);
1697
1698 /* Remove netdevice */
1699 unregister_netdev(self->netdev);
1700
1701 /* Make sure the irq handler is not exectuting */
1702 spin_lock_irqsave(&self->lock, flags);
1703
1704 /* Stop interrupts */
1705 register_bank(iobase, 0);
1706 outb(0, iobase+IRCC_IER);
1707 outb(IRCC_MASTER_RESET, iobase+IRCC_MASTER);
1708 outb(0x00, iobase+IRCC_MASTER);
1709#if 0
1710 /* Reset to SIR mode */
1711 register_bank(iobase, 1);
1712 outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase+IRCC_SCE_CFGA);
1713 outb(IRCC_CFGB_IR, iobase+IRCC_SCE_CFGB);
1714#endif
1715 spin_unlock_irqrestore(&self->lock, flags);
1716
1717 /* Release the PORTS that this driver is using */
1718 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
1719 self->io.fir_base);
1720
1721 release_region(self->io.fir_base, self->io.fir_ext);
1722
1723 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
1724 self->io.sir_base);
1725
1726 release_region(self->io.sir_base, self->io.sir_ext);
1727
1728 if (self->tx_buff.head)
1729 dma_free_coherent(NULL, self->tx_buff.truesize,
1730 self->tx_buff.head, self->tx_buff_dma);
1731
1732 if (self->rx_buff.head)
1733 dma_free_coherent(NULL, self->rx_buff.truesize,
1734 self->rx_buff.head, self->rx_buff_dma);
1735
1736 free_netdev(self->netdev);
1737
1738 return 0;
1739}
1740
1741static void __exit smsc_ircc_cleanup(void)
1742{
1743 int i;
1744
1745 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1746
1747 for (i=0; i < 2; i++) {
1748 if (dev_self[i])
1749 smsc_ircc_close(dev_self[i]);
1750 }
1751}
1752
1753/*
1754 * Start SIR operations
1755 *
1756 * This function *must* be called with spinlock held, because it may
1757 * be called from the irq handler (via smsc_ircc_change_speed()). - Jean II
1758 */
1759void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
1760{
1761 struct net_device *dev;
1762 int fir_base, sir_base;
1763
1764 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1765
1766 IRDA_ASSERT(self != NULL, return;);
1767 dev= self->netdev;
1768 IRDA_ASSERT(dev != NULL, return;);
1769 dev->hard_start_xmit = &smsc_ircc_hard_xmit_sir;
1770
1771 fir_base = self->io.fir_base;
1772 sir_base = self->io.sir_base;
1773
1774 /* Reset everything */
1775 outb(IRCC_MASTER_RESET, fir_base+IRCC_MASTER);
1776
1777 #if SMSC_IRCC2_C_SIR_STOP
1778 /*smsc_ircc_sir_stop(self);*/
1779 #endif
1780
1781 register_bank(fir_base, 1);
1782 outb(((inb(fir_base+IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | IRCC_CFGA_IRDA_SIR_A), fir_base+IRCC_SCE_CFGA);
1783
1784 /* Initialize UART */
1785 outb(UART_LCR_WLEN8, sir_base+UART_LCR); /* Reset DLAB */
1786 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), sir_base+UART_MCR);
1787
1788 /* Turn on interrups */
1789 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base+UART_IER);
1790
1791 IRDA_DEBUG(3, "%s() - exit\n", __FUNCTION__);
1792
1793 outb(0x00, fir_base+IRCC_MASTER);
1794}
1795
1796#if SMSC_IRCC2_C_SIR_STOP
1797void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
1798{
1799 int iobase;
1800
1801 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1802 iobase = self->io.sir_base;
1803
1804 /* Reset UART */
1805 outb(0, iobase+UART_MCR);
1806
1807 /* Turn off interrupts */
1808 outb(0, iobase+UART_IER);
1809}
1810#endif
1811
1812/*
1813 * Function smsc_sir_write_wakeup (self)
1814 *
1815 * Called by the SIR interrupt handler when there's room for more data.
1816 * If we have more packets to send, we send them here.
1817 *
1818 */
1819static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1820{
1821 int actual = 0;
1822 int iobase;
1823 int fcr;
1824
1825 IRDA_ASSERT(self != NULL, return;);
1826
1827 IRDA_DEBUG(4, "%s\n", __FUNCTION__);
1828
1829 iobase = self->io.sir_base;
1830
1831 /* Finished with frame? */
1832 if (self->tx_buff.len > 0) {
1833 /* Write data left in transmit buffer */
1834 actual = smsc_ircc_sir_write(iobase, self->io.fifo_size,
1835 self->tx_buff.data, self->tx_buff.len);
1836 self->tx_buff.data += actual;
1837 self->tx_buff.len -= actual;
1838 } else {
1839
1840 /*if (self->tx_buff.len ==0) {*/
1841
1842 /*
1843 * Now serial buffer is almost free & we can start
1844 * transmission of another packet. But first we must check
1845 * if we need to change the speed of the hardware
1846 */
1847 if (self->new_speed) {
1848 IRDA_DEBUG(5, "%s(), Changing speed to %d.\n",
1849 __FUNCTION__, self->new_speed);
1850 smsc_ircc_sir_wait_hw_transmitter_finish(self);
1851 smsc_ircc_change_speed(self, self->new_speed);
1852 self->new_speed = 0;
1853 } else {
1854 /* Tell network layer that we want more frames */
1855 netif_wake_queue(self->netdev);
1856 }
1857 self->stats.tx_packets++;
1858
1859 if(self->io.speed <= 115200) {
1860 /*
1861 * Reset Rx FIFO to make sure that all reflected transmit data
1862 * is discarded. This is needed for half duplex operation
1863 */
1864 fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR;
1865 if (self->io.speed < 38400)
1866 fcr |= UART_FCR_TRIGGER_1;
1867 else
1868 fcr |= UART_FCR_TRIGGER_14;
1869
1870 outb(fcr, iobase+UART_FCR);
1871
1872 /* Turn on receive interrupts */
1873 outb(UART_IER_RDI, iobase+UART_IER);
1874 }
1875 }
1876}
1877
1878/*
1879 * Function smsc_ircc_sir_write (iobase, fifo_size, buf, len)
1880 *
1881 * Fill Tx FIFO with transmit data
1882 *
1883 */
1884static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
1885{
1886 int actual = 0;
1887
1888 /* Tx FIFO should be empty! */
1889 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
1890 IRDA_WARNING("%s(), failed, fifo not empty!\n", __FUNCTION__);
1891 return 0;
1892 }
1893
1894 /* Fill FIFO with current frame */
1895 while ((fifo_size-- > 0) && (actual < len)) {
1896 /* Transmit next byte */
1897 outb(buf[actual], iobase+UART_TX);
1898 actual++;
1899 }
1900 return actual;
1901}
1902
1903/*
1904 * Function smsc_ircc_is_receiving (self)
1905 *
1906 * Returns true is we are currently receiving data
1907 *
1908 */
1909static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self)
1910{
1911 return (self->rx_buff.state != OUTSIDE_FRAME);
1912}
1913
1914
1915/*
1916 * Function smsc_ircc_probe_transceiver(self)
1917 *
1918 * Tries to find the used Transceiver
1919 *
1920 */
1921static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
1922{
1923 unsigned int i;
1924
1925 IRDA_ASSERT(self != NULL, return;);
1926
1927 for(i=0; smsc_transceivers[i].name!=NULL; i++)
1928 if((*smsc_transceivers[i].probe)(self->io.fir_base)) {
1929 IRDA_MESSAGE(" %s transceiver found\n",
1930 smsc_transceivers[i].name);
1931 self->transceiver= i+1;
1932 return;
1933 }
1934 IRDA_MESSAGE("No transceiver found. Defaulting to %s\n",
1935 smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
1936
1937 self->transceiver= SMSC_IRCC2_C_DEFAULT_TRANSCEIVER;
1938}
1939
1940
1941/*
1942 * Function smsc_ircc_set_transceiver_for_speed(self, speed)
1943 *
1944 * Set the transceiver according to the speed
1945 *
1946 */
1947static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed)
1948{
1949 unsigned int trx;
1950
1951 trx = self->transceiver;
1952 if(trx>0) (*smsc_transceivers[trx-1].set_for_speed)(self->io.fir_base, speed);
1953}
1954
1955/*
1956 * Function smsc_ircc_wait_hw_transmitter_finish ()
1957 *
1958 * Wait for the real end of HW transmission
1959 *
1960 * The UART is a strict FIFO, and we get called only when we have finished
1961 * pushing data to the FIFO, so the maximum amount of time we must wait
1962 * is only for the FIFO to drain out.
1963 *
1964 * We use a simple calibrated loop. We may need to adjust the loop
1965 * delay (udelay) to balance I/O traffic and latency. And we also need to
1966 * adjust the maximum timeout.
1967 * It would probably be better to wait for the proper interrupt,
1968 * but it doesn't seem to be available.
1969 *
1970 * We can't use jiffies or kernel timers because :
1971 * 1) We are called from the interrupt handler, which disable softirqs,
1972 * so jiffies won't be increased
1973 * 2) Jiffies granularity is usually very coarse (10ms), and we don't
1974 * want to wait that long to detect stuck hardware.
1975 * Jean II
1976 */
1977
1978static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
1979{
1980 int iobase;
1981 int count = SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US;
1982
1983 iobase = self->io.sir_base;
1984
1985 /* Calibrated busy loop */
1986 while((count-- > 0) && !(inb(iobase+UART_LSR) & UART_LSR_TEMT))
1987 udelay(1);
1988
1989 if(count == 0)
1990 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__);
1991}
1992
1993
1994/* PROBING
1995 *
1996 *
1997 */
1998
1999static int __init smsc_ircc_look_for_chips(void)
2000{
2001 smsc_chip_address_t *address;
2002 char *type;
2003 unsigned int cfg_base, found;
2004
2005 found = 0;
2006 address = possible_addresses;
2007
2008 while(address->cfg_base){
2009 cfg_base = address->cfg_base;
2010
2011 /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __FUNCTION__, cfg_base, address->type);*/
2012
2013 if( address->type & SMSCSIO_TYPE_FDC){
2014 type = "FDC";
2015 if((address->type) & SMSCSIO_TYPE_FLAT) {
2016 if(!smsc_superio_flat(fdc_chips_flat,cfg_base, type)) found++;
2017 }
2018 if((address->type) & SMSCSIO_TYPE_PAGED) {
2019 if(!smsc_superio_paged(fdc_chips_paged,cfg_base, type)) found++;
2020 }
2021 }
2022 if( address->type & SMSCSIO_TYPE_LPC){
2023 type = "LPC";
2024 if((address->type) & SMSCSIO_TYPE_FLAT) {
2025 if(!smsc_superio_flat(lpc_chips_flat,cfg_base,type)) found++;
2026 }
2027 if((address->type) & SMSCSIO_TYPE_PAGED) {
2028 if(!smsc_superio_paged(lpc_chips_paged,cfg_base,"LPC")) found++;
2029 }
2030 }
2031 address++;
2032 }
2033 return found;
2034}
2035
2036/*
2037 * Function smsc_superio_flat (chip, base, type)
2038 *
2039 * Try to get configuration of a smc SuperIO chip with flat register model
2040 *
2041 */
2042static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfgbase, char *type)
2043{
2044 unsigned short firbase, sirbase;
2045 u8 mode, dma, irq;
2046 int ret = -ENODEV;
2047
2048 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2049
2050 if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type)==NULL)
2051 return ret;
2052
2053 outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase);
2054 mode = inb(cfgbase+1);
2055
2056 /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __FUNCTION__, mode);*/
2057
2058 if(!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
2059 IRDA_WARNING("%s(): IrDA not enabled\n", __FUNCTION__);
2060
2061 outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
2062 sirbase = inb(cfgbase+1) << 2;
2063
2064 /* FIR iobase */
2065 outb(SMSCSIOFLAT_FIRBASEADDR_REG, cfgbase);
2066 firbase = inb(cfgbase+1) << 3;
2067
2068 /* DMA */
2069 outb(SMSCSIOFLAT_FIRDMASELECT_REG, cfgbase);
2070 dma = inb(cfgbase+1) & SMSCSIOFLAT_FIRDMASELECT_MASK;
2071
2072 /* IRQ */
2073 outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
2074 irq = inb(cfgbase+1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
2075
2076 IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __FUNCTION__, firbase, sirbase, dma, irq, mode);
2077
2078 if (firbase) {
2079 if (smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
2080 ret=0;
2081 }
2082
2083 /* Exit configuration */
2084 outb(SMSCSIO_CFGEXITKEY, cfgbase);
2085
2086 return ret;
2087}
2088
2089/*
2090 * Function smsc_superio_paged (chip, base, type)
2091 *
2092 * Try to get configuration of a smc SuperIO chip with paged register model
2093 *
2094 */
2095static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cfg_base, char *type)
2096{
2097 unsigned short fir_io, sir_io;
2098 int ret = -ENODEV;
2099
2100 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2101
2102 if (smsc_ircc_probe(cfg_base,0x20,chips,type)==NULL)
2103 return ret;
2104
2105 /* Select logical device (UART2) */
2106 outb(0x07, cfg_base);
2107 outb(0x05, cfg_base + 1);
2108
2109 /* SIR iobase */
2110 outb(0x60, cfg_base);
2111 sir_io = inb(cfg_base + 1) << 8;
2112 outb(0x61, cfg_base);
2113 sir_io |= inb(cfg_base + 1);
2114
2115 /* Read FIR base */
2116 outb(0x62, cfg_base);
2117 fir_io = inb(cfg_base + 1) << 8;
2118 outb(0x63, cfg_base);
2119 fir_io |= inb(cfg_base + 1);
2120 outb(0x2b, cfg_base); /* ??? */
2121
2122 if (fir_io) {
2123 if (smsc_ircc_open(fir_io, sir_io, ircc_dma, ircc_irq) == 0)
2124 ret=0;
2125 }
2126
2127 /* Exit configuration */
2128 outb(SMSCSIO_CFGEXITKEY, cfg_base);
2129
2130 return ret;
2131}
2132
2133
2134static int __init smsc_access(unsigned short cfg_base,unsigned char reg)
2135{
2136 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2137
2138 outb(reg, cfg_base);
2139
2140 if (inb(cfg_base)!=reg)
2141 return -1;
2142
2143 return 0;
2144}
2145
2146static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg,const smsc_chip_t *chip,char *type)
2147{
2148 u8 devid,xdevid,rev;
2149
2150 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2151
2152 /* Leave configuration */
2153
2154 outb(SMSCSIO_CFGEXITKEY, cfg_base);
2155
2156 if (inb(cfg_base) == SMSCSIO_CFGEXITKEY) /* not a smc superio chip */
2157 return NULL;
2158
2159 outb(reg, cfg_base);
2160
2161 xdevid=inb(cfg_base+1);
2162
2163 /* Enter configuration */
2164
2165 outb(SMSCSIO_CFGACCESSKEY, cfg_base);
2166
2167 #if 0
2168 if (smsc_access(cfg_base,0x55)) /* send second key and check */
2169 return NULL;
2170 #endif
2171
2172 /* probe device ID */
2173
2174 if (smsc_access(cfg_base,reg))
2175 return NULL;
2176
2177 devid=inb(cfg_base+1);
2178
2179 if (devid==0) /* typical value for unused port */
2180 return NULL;
2181
2182 if (devid==0xff) /* typical value for unused port */
2183 return NULL;
2184
2185 /* probe revision ID */
2186
2187 if (smsc_access(cfg_base,reg+1))
2188 return NULL;
2189
2190 rev=inb(cfg_base+1);
2191
2192 if (rev>=128) /* i think this will make no sense */
2193 return NULL;
2194
2195 if (devid==xdevid) /* protection against false positives */
2196 return NULL;
2197
2198 /* Check for expected device ID; are there others? */
2199
2200 while(chip->devid!=devid) {
2201
2202 chip++;
2203
2204 if (chip->name==NULL)
2205 return NULL;
2206 }
2207
2208 IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",devid,rev,cfg_base,type,chip->name);
2209
2210 if (chip->rev>rev){
2211 IRDA_MESSAGE("Revision higher than expected\n");
2212 return NULL;
2213 }
2214
2215 if (chip->flags&NoIRDA)
2216 IRDA_MESSAGE("chipset does not support IRDA\n");
2217
2218 return chip;
2219}
2220
2221static int __init smsc_superio_fdc(unsigned short cfg_base)
2222{
2223 int ret = -1;
2224
2225 if (!request_region(cfg_base, 2, driver_name)) {
2226 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2227 __FUNCTION__, cfg_base);
2228 } else {
2229 if (!smsc_superio_flat(fdc_chips_flat,cfg_base,"FDC")
2230 ||!smsc_superio_paged(fdc_chips_paged,cfg_base,"FDC"))
2231 ret = 0;
2232
2233 release_region(cfg_base, 2);
2234 }
2235
2236 return ret;
2237}
2238
2239static int __init smsc_superio_lpc(unsigned short cfg_base)
2240{
2241 int ret = -1;
2242
2243 if (!request_region(cfg_base, 2, driver_name)) {
2244 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2245 __FUNCTION__, cfg_base);
2246 } else {
2247 if (!smsc_superio_flat(lpc_chips_flat,cfg_base,"LPC")
2248 ||!smsc_superio_paged(lpc_chips_paged,cfg_base,"LPC"))
2249 ret = 0;
2250 release_region(cfg_base, 2);
2251 }
2252 return ret;
2253}
2254
2255/************************************************
2256 *
2257 * Transceivers specific functions
2258 *
2259 ************************************************/
2260
2261
2262/*
2263 * Function smsc_ircc_set_transceiver_smsc_ircc_atc(fir_base, speed)
2264 *
2265 * Program transceiver through smsc-ircc ATC circuitry
2266 *
2267 */
2268
2269static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
2270{
2271 unsigned long jiffies_now, jiffies_timeout;
2272 u8 val;
2273
2274 jiffies_now= jiffies;
2275 jiffies_timeout= jiffies+SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES;
2276
2277 /* ATC */
2278 register_bank(fir_base, 4);
2279 outb((inb(fir_base+IRCC_ATC) & IRCC_ATC_MASK) |IRCC_ATC_nPROGREADY|IRCC_ATC_ENABLE, fir_base+IRCC_ATC);
2280 while((val=(inb(fir_base+IRCC_ATC) & IRCC_ATC_nPROGREADY)) && !time_after(jiffies, jiffies_timeout));
2281 if(val)
2282 IRDA_WARNING("%s(): ATC: 0x%02x\n", __FUNCTION__,
2283 inb(fir_base+IRCC_ATC));
2284}
2285
2286/*
2287 * Function smsc_ircc_probe_transceiver_smsc_ircc_atc(fir_base)
2288 *
2289 * Probe transceiver smsc-ircc ATC circuitry
2290 *
2291 */
2292
2293static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base)
2294{
2295 return 0;
2296}
2297
2298/*
2299 * Function smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(self, speed)
2300 *
2301 * Set transceiver
2302 *
2303 */
2304
2305static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed)
2306{
2307 u8 fast_mode;
2308
2309 switch(speed)
2310 {
2311 default:
2312 case 576000 :
2313 fast_mode = 0;
2314 break;
2315 case 1152000 :
2316 case 4000000 :
2317 fast_mode = IRCC_LCR_A_FAST;
2318 break;
2319
2320 }
2321 register_bank(fir_base, 0);
2322 outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast_mode, fir_base+IRCC_LCR_A);
2323}
2324
2325/*
2326 * Function smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(fir_base)
2327 *
2328 * Probe transceiver
2329 *
2330 */
2331
2332static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base)
2333{
2334 return 0;
2335}
2336
2337/*
2338 * Function smsc_ircc_set_transceiver_toshiba_sat1800(fir_base, speed)
2339 *
2340 * Set transceiver
2341 *
2342 */
2343
2344static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed)
2345{
2346 u8 fast_mode;
2347
2348 switch(speed)
2349 {
2350 default:
2351 case 576000 :
2352 fast_mode = 0;
2353 break;
2354 case 1152000 :
2355 case 4000000 :
2356 fast_mode = /*IRCC_LCR_A_FAST |*/ IRCC_LCR_A_GP_DATA;
2357 break;
2358
2359 }
2360 /* This causes an interrupt */
2361 register_bank(fir_base, 0);
2362 outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast_mode, fir_base+IRCC_LCR_A);
2363}
2364
2365/*
2366 * Function smsc_ircc_probe_transceiver_toshiba_sat1800(fir_base)
2367 *
2368 * Probe transceiver
2369 *
2370 */
2371
2372static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base)
2373{
2374 return 0;
2375}
2376
2377
2378module_init(smsc_ircc_init);
2379module_exit(smsc_ircc_cleanup);
2380
2381MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
2382MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
2383MODULE_LICENSE("GPL");
2384
2385module_param(ircc_dma, int, 0);
2386MODULE_PARM_DESC(ircc_dma, "DMA channel");
2387module_param(ircc_irq, int, 0);
2388MODULE_PARM_DESC(ircc_irq, "IRQ line");
2389module_param(ircc_fir, int, 0);
2390MODULE_PARM_DESC(ircc_fir, "FIR Base Address");
2391module_param(ircc_sir, int, 0);
2392MODULE_PARM_DESC(ircc_sir, "SIR Base Address");
2393module_param(ircc_cfg, int, 0);
2394MODULE_PARM_DESC(ircc_cfg, "Configuration register base address");
2395module_param(ircc_transceiver, int, 0);
2396MODULE_PARM_DESC(ircc_transceiver, "Transceiver type");
diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/net/irda/smsc-ircc2.h
new file mode 100644
index 000000000000..458611cc0d40
--- /dev/null
+++ b/drivers/net/irda/smsc-ircc2.h
@@ -0,0 +1,194 @@
1/*********************************************************************
2 * $Id: smsc-ircc2.h,v 1.12.2.1 2002/10/27 10:52:37 dip Exp $
3 *
4 * Description: Definitions for the SMC IrCC chipset
5 * Status: Experimental.
6 * Author: Daniele Peri (peri@csai.unipa.it)
7 *
8 * Copyright (c) 2002 Daniele Peri
9 * All Rights Reserved.
10 *
11 * Based on smc-ircc.h:
12 *
13 * Copyright (c) 1999-2000, Dag Brattli <dagb@cs.uit.no>
14 * Copyright (c) 1998-1999, Thomas Davis (tadavis@jps.net>
15 * All Rights Reserved
16 *
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of
21 * the License, or (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
31 * MA 02111-1307 USA
32 *
33 ********************************************************************/
34
35#ifndef SMSC_IRCC2_H
36#define SMSC_IRCC2_H
37
38/* DMA modes needed */
39#define DMA_TX_MODE 0x08 /* Mem to I/O, ++, demand. */
40#define DMA_RX_MODE 0x04 /* I/O to mem, ++, demand. */
41
42/* Master Control Register */
43#define IRCC_MASTER 0x07
44#define IRCC_MASTER_POWERDOWN 0x80
45#define IRCC_MASTER_RESET 0x40
46#define IRCC_MASTER_INT_EN 0x20
47#define IRCC_MASTER_ERROR_RESET 0x10
48
49/* Register block 0 */
50
51/* Interrupt Identification */
52#define IRCC_IIR 0x01
53#define IRCC_IIR_ACTIVE_FRAME 0x80
54#define IRCC_IIR_EOM 0x40
55#define IRCC_IIR_RAW_MODE 0x20
56#define IRCC_IIR_FIFO 0x10
57
58/* Interrupt Enable */
59#define IRCC_IER 0x02
60#define IRCC_IER_ACTIVE_FRAME 0x80
61#define IRCC_IER_EOM 0x40
62#define IRCC_IER_RAW_MODE 0x20
63#define IRCC_IER_FIFO 0x10
64
65/* Line Status Register */
66#define IRCC_LSR 0x03
67#define IRCC_LSR_UNDERRUN 0x80
68#define IRCC_LSR_OVERRUN 0x40
69#define IRCC_LSR_FRAME_ERROR 0x20
70#define IRCC_LSR_SIZE_ERROR 0x10
71#define IRCC_LSR_CRC_ERROR 0x80
72#define IRCC_LSR_FRAME_ABORT 0x40
73
74/* Line Status Address Register */
75#define IRCC_LSAR 0x03
76#define IRCC_LSAR_ADDRESS_MASK 0x07
77
78/* Line Control Register A */
79#define IRCC_LCR_A 0x04
80#define IRCC_LCR_A_FIFO_RESET 0x80
81#define IRCC_LCR_A_FAST 0x40
82#define IRCC_LCR_A_GP_DATA 0x20
83#define IRCC_LCR_A_RAW_TX 0x10
84#define IRCC_LCR_A_RAW_RX 0x08
85#define IRCC_LCR_A_ABORT 0x04
86#define IRCC_LCR_A_DATA_DONE 0x02
87
88/* Line Control Register B */
89#define IRCC_LCR_B 0x05
90#define IRCC_LCR_B_SCE_DISABLED 0x00
91#define IRCC_LCR_B_SCE_TRANSMIT 0x40
92#define IRCC_LCR_B_SCE_RECEIVE 0x80
93#define IRCC_LCR_B_SCE_UNDEFINED 0xc0
94#define IRCC_LCR_B_SIP_ENABLE 0x20
95#define IRCC_LCR_B_BRICK_WALL 0x10
96
97/* Bus Status Register */
98#define IRCC_BSR 0x06
99#define IRCC_BSR_NOT_EMPTY 0x80
100#define IRCC_BSR_FIFO_FULL 0x40
101#define IRCC_BSR_TIMEOUT 0x20
102
103/* Register block 1 */
104
105#define IRCC_FIFO_THRESHOLD 0x02
106
107#define IRCC_SCE_CFGA 0x00
108#define IRCC_CFGA_AUX_IR 0x80
109#define IRCC_CFGA_HALF_DUPLEX 0x04
110#define IRCC_CFGA_TX_POLARITY 0x02
111#define IRCC_CFGA_RX_POLARITY 0x01
112
113#define IRCC_CFGA_COM 0x00
114#define IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK 0x87
115#define IRCC_CFGA_IRDA_SIR_A 0x08
116#define IRCC_CFGA_ASK_SIR 0x10
117#define IRCC_CFGA_IRDA_SIR_B 0x18
118#define IRCC_CFGA_IRDA_HDLC 0x20
119#define IRCC_CFGA_IRDA_4PPM 0x28
120#define IRCC_CFGA_CONSUMER 0x30
121#define IRCC_CFGA_RAW_IR 0x38
122#define IRCC_CFGA_OTHER 0x40
123
124#define IRCC_IR_HDLC 0x04
125#define IRCC_IR_4PPM 0x01
126#define IRCC_IR_CONSUMER 0x02
127
128#define IRCC_SCE_CFGB 0x01
129#define IRCC_CFGB_LOOPBACK 0x20
130#define IRCC_CFGB_LPBCK_TX_CRC 0x10
131#define IRCC_CFGB_NOWAIT 0x08
132#define IRCC_CFGB_STRING_MOVE 0x04
133#define IRCC_CFGB_DMA_BURST 0x02
134#define IRCC_CFGB_DMA_ENABLE 0x01
135
136#define IRCC_CFGB_MUX_COM 0x00
137#define IRCC_CFGB_MUX_IR 0x40
138#define IRCC_CFGB_MUX_AUX 0x80
139#define IRCC_CFGB_MUX_INACTIVE 0xc0
140
141/* Register block 3 - Identification Registers! */
142#define IRCC_ID_HIGH 0x00 /* 0x10 */
143#define IRCC_ID_LOW 0x01 /* 0xB8 */
144#define IRCC_CHIP_ID 0x02 /* 0xF1 */
145#define IRCC_VERSION 0x03 /* 0x01 */
146#define IRCC_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */
147#define IRCC_INTERFACE_DMA_MASK 0x0F /* low 4 = DMA, high 4 = IRQ */
148#define IRCC_INTERFACE_IRQ_MASK 0xF0 /* low 4 = DMA, high 4 = IRQ */
149
150/* Register block 4 - IrDA */
151#define IRCC_CONTROL 0x00
152#define IRCC_BOF_COUNT_LO 0x01 /* Low byte */
153#define IRCC_BOF_COUNT_HI 0x00 /* High nibble (bit 0-3) */
154#define IRCC_BRICKWALL_CNT_LO 0x02 /* Low byte */
155#define IRCC_BRICKWALL_CNT_HI 0x03 /* High nibble (bit 4-7) */
156#define IRCC_TX_SIZE_LO 0x04 /* Low byte */
157#define IRCC_TX_SIZE_HI 0x03 /* High nibble (bit 0-3) */
158#define IRCC_RX_SIZE_HI 0x05 /* High nibble (bit 0-3) */
159#define IRCC_RX_SIZE_LO 0x06 /* Low byte */
160
161#define IRCC_1152 0x80
162#define IRCC_CRC 0x40
163
164/* Register block 5 - IrDA */
165#define IRCC_ATC 0x00
166#define IRCC_ATC_nPROGREADY 0x80
167#define IRCC_ATC_SPEED 0x40
168#define IRCC_ATC_ENABLE 0x20
169#define IRCC_ATC_MASK 0xE0
170
171
172#define IRCC_IRHALFDUPLEX_TIMEOUT 0x01
173
174#define IRCC_SCE_TX_DELAY_TIMER 0x02
175
176/*
177 * Other definitions
178 */
179
180#define SMSC_IRCC2_MAX_SIR_SPEED 115200
181#define SMSC_IRCC2_FIR_CHIP_IO_EXTENT 8
182#define SMSC_IRCC2_SIR_CHIP_IO_EXTENT 8
183#define SMSC_IRCC2_FIFO_SIZE 16
184#define SMSC_IRCC2_FIFO_THRESHOLD 64
185/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
186#define SMSC_IRCC2_RX_BUFF_TRUESIZE 14384
187#define SMSC_IRCC2_TX_BUFF_TRUESIZE 14384
188#define SMSC_IRCC2_MIN_TURN_TIME 0x07
189#define SMSC_IRCC2_WINDOW_SIZE 0x07
190/* Maximum wait for hw transmitter to finish */
191#define SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US 1000 /* 1 ms */
192/* Maximum wait for ATC transceiver programming to finish */
193#define SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES 1
194#endif /* SMSC_IRCC2_H */
diff --git a/drivers/net/irda/smsc-sio.h b/drivers/net/irda/smsc-sio.h
new file mode 100644
index 000000000000..59e20e653ebe
--- /dev/null
+++ b/drivers/net/irda/smsc-sio.h
@@ -0,0 +1,100 @@
1#ifndef SMSC_SIO_H
2#define SMSC_SIO_H
3
4/******************************************
5 Keys. They should work with every SMsC SIO
6 ******************************************/
7
8#define SMSCSIO_CFGACCESSKEY 0x55
9#define SMSCSIO_CFGEXITKEY 0xaa
10
11/*****************************
12 * Generic SIO Flat (!?) *
13 *****************************/
14
15/* Register 0x0d */
16#define SMSCSIOFLAT_DEVICEID_REG 0x0d
17
18/* Register 0x0c */
19#define SMSCSIOFLAT_UARTMODE0C_REG 0x0c
20#define SMSCSIOFLAT_UART2MODE_MASK 0x38
21#define SMSCSIOFLAT_UART2MODE_VAL_COM 0x00
22#define SMSCSIOFLAT_UART2MODE_VAL_IRDA 0x08
23#define SMSCSIOFLAT_UART2MODE_VAL_ASKIR 0x10
24
25/* Register 0x25 */
26#define SMSCSIOFLAT_UART2BASEADDR_REG 0x25
27
28/* Register 0x2b */
29#define SMSCSIOFLAT_FIRBASEADDR_REG 0x2b
30
31/* Register 0x2c */
32#define SMSCSIOFLAT_FIRDMASELECT_REG 0x2c
33#define SMSCSIOFLAT_FIRDMASELECT_MASK 0x0f
34
35/* Register 0x28 */
36#define SMSCSIOFLAT_UARTIRQSELECT_REG 0x28
37#define SMSCSIOFLAT_UART2IRQSELECT_MASK 0x0f
38#define SMSCSIOFLAT_UART1IRQSELECT_MASK 0xf0
39#define SMSCSIOFLAT_UARTIRQSELECT_VAL_NONE 0x00
40
41
42/*********************
43 * LPC47N227 *
44 *********************/
45
46#define LPC47N227_CFGACCESSKEY 0x55
47#define LPC47N227_CFGEXITKEY 0xaa
48
49/* Register 0x00 */
50#define LPC47N227_FDCPOWERVALIDCONF_REG 0x00
51#define LPC47N227_FDCPOWER_MASK 0x08
52#define LPC47N227_VALID_MASK 0x80
53
54/* Register 0x02 */
55#define LPC47N227_UART12POWER_REG 0x02
56#define LPC47N227_UART1POWERDOWN_MASK 0x08
57#define LPC47N227_UART2POWERDOWN_MASK 0x80
58
59/* Register 0x07 */
60#define LPC47N227_APMBOOTDRIVE_REG 0x07
61#define LPC47N227_PARPORT2AUTOPWRDOWN_MASK 0x10 /* auto power down on if set */
62#define LPC47N227_UART2AUTOPWRDOWN_MASK 0x20 /* auto power down on if set */
63#define LPC47N227_UART1AUTOPWRDOWN_MASK 0x40 /* auto power down on if set */
64
65/* Register 0x0c */
66#define LPC47N227_UARTMODE0C_REG 0x0c
67#define LPC47N227_UART2MODE_MASK 0x38
68#define LPC47N227_UART2MODE_VAL_COM 0x00
69#define LPC47N227_UART2MODE_VAL_IRDA 0x08
70#define LPC47N227_UART2MODE_VAL_ASKIR 0x10
71
72/* Register 0x0d */
73#define LPC47N227_DEVICEID_REG 0x0d
74#define LPC47N227_DEVICEID_DEFVAL 0x5a
75
76/* Register 0x0e */
77#define LPC47N227_REVISIONID_REG 0x0e
78
79/* Register 0x25 */
80#define LPC47N227_UART2BASEADDR_REG 0x25
81
82/* Register 0x28 */
83#define LPC47N227_UARTIRQSELECT_REG 0x28
84#define LPC47N227_UART2IRQSELECT_MASK 0x0f
85#define LPC47N227_UART1IRQSELECT_MASK 0xf0
86#define LPC47N227_UARTIRQSELECT_VAL_NONE 0x00
87
88/* Register 0x2b */
89#define LPC47N227_FIRBASEADDR_REG 0x2b
90
91/* Register 0x2c */
92#define LPC47N227_FIRDMASELECT_REG 0x2c
93#define LPC47N227_FIRDMASELECT_MASK 0x0f
94#define LPC47N227_FIRDMASELECT_VAL_DMA1 0x01 /* 47n227 has three dma channels */
95#define LPC47N227_FIRDMASELECT_VAL_DMA2 0x02
96#define LPC47N227_FIRDMASELECT_VAL_DMA3 0x03
97#define LPC47N227_FIRDMASELECT_VAL_NONE 0x0f
98
99
100#endif
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
new file mode 100644
index 000000000000..83c605e8824c
--- /dev/null
+++ b/drivers/net/irda/stir4200.c
@@ -0,0 +1,1184 @@
1/*****************************************************************************
2*
3* Filename: stir4200.c
4* Version: 0.4
5* Description: Irda SigmaTel USB Dongle
6* Status: Experimental
7* Author: Stephen Hemminger <shemminger@osdl.org>
8*
9* Based on earlier driver by Paul Stewart <stewart@parc.com>
10*
11* Copyright (C) 2000, Roman Weissgaerber <weissg@vienna.at>
12* Copyright (C) 2001, Dag Brattli <dag@brattli.net>
13* Copyright (C) 2001, Jean Tourrilhes <jt@hpl.hp.com>
14* Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org>
15*
16* This program is free software; you can redistribute it and/or modify
17* it under the terms of the GNU General Public License as published by
18* the Free Software Foundation; either version 2 of the License, or
19* (at your option) any later version.
20*
21* This program is distributed in the hope that it will be useful,
22* but WITHOUT ANY WARRANTY; without even the implied warranty of
23* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24* GNU General Public License for more details.
25*
26* You should have received a copy of the GNU General Public License
27* along with this program; if not, write to the Free Software
28* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29*
30*****************************************************************************/
31
32/*
33 * This dongle does no framing, and requires polling to receive the
34 * data. The STIr4200 has bulk in and out endpoints just like
35 * usr-irda devices, but the data it sends and receives is raw; like
36 * irtty, it needs to call the wrap and unwrap functions to add and
37 * remove SOF/BOF and escape characters to/from the frame.
38 */
39
40#include <linux/module.h>
41#include <linux/moduleparam.h>
42
43#include <linux/kernel.h>
44#include <linux/types.h>
45#include <linux/init.h>
46#include <linux/time.h>
47#include <linux/skbuff.h>
48#include <linux/netdevice.h>
49#include <linux/slab.h>
50#include <linux/delay.h>
51#include <linux/usb.h>
52#include <linux/crc32.h>
53#include <net/irda/irda.h>
54#include <net/irda/irlap.h>
55#include <net/irda/irda_device.h>
56#include <net/irda/wrapper.h>
57#include <net/irda/crc.h>
58#include <asm/byteorder.h>
59#include <asm/unaligned.h>
60
61MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
62MODULE_DESCRIPTION("IrDA-USB Dongle Driver for SigmaTel STIr4200");
63MODULE_LICENSE("GPL");
64
65static int qos_mtt_bits = 0x07; /* 1 ms or more */
66module_param(qos_mtt_bits, int, 0);
67MODULE_PARM_DESC(qos_mtt_bits, "Minimum Turn Time");
68
69static int rx_sensitivity = 1; /* FIR 0..4, SIR 0..6 */
70module_param(rx_sensitivity, int, 0);
71MODULE_PARM_DESC(rx_sensitivity, "Set Receiver sensitivity (0-6, 0 is most sensitive)");
72
73static int tx_power = 0; /* 0 = highest ... 3 = lowest */
74module_param(tx_power, int, 0);
75MODULE_PARM_DESC(tx_power, "Set Transmitter power (0-3, 0 is highest power)");
76
77#define STIR_IRDA_HEADER 4
78#define CTRL_TIMEOUT 100 /* milliseconds */
79#define TRANSMIT_TIMEOUT 200 /* milliseconds */
80#define STIR_FIFO_SIZE 4096
81#define FIFO_REGS_SIZE 3
82
83enum FirChars {
84 FIR_CE = 0x7d,
85 FIR_XBOF = 0x7f,
86 FIR_EOF = 0x7e,
87};
88
89enum StirRequests {
90 REQ_WRITE_REG = 0x00,
91 REQ_READ_REG = 0x01,
92 REQ_READ_ROM = 0x02,
93 REQ_WRITE_SINGLE = 0x03,
94};
95
96/* Register offsets */
97enum StirRegs {
98 REG_RSVD=0,
99 REG_MODE,
100 REG_PDCLK,
101 REG_CTRL1,
102 REG_CTRL2,
103 REG_FIFOCTL,
104 REG_FIFOLSB,
105 REG_FIFOMSB,
106 REG_DPLL,
107 REG_IRDIG,
108 REG_TEST=15,
109};
110
111enum StirModeMask {
112 MODE_FIR = 0x80,
113 MODE_SIR = 0x20,
114 MODE_ASK = 0x10,
115 MODE_FASTRX = 0x08,
116 MODE_FFRSTEN = 0x04,
117 MODE_NRESET = 0x02,
118 MODE_2400 = 0x01,
119};
120
121enum StirPdclkMask {
122 PDCLK_4000000 = 0x02,
123 PDCLK_115200 = 0x09,
124 PDCLK_57600 = 0x13,
125 PDCLK_38400 = 0x1D,
126 PDCLK_19200 = 0x3B,
127 PDCLK_9600 = 0x77,
128 PDCLK_2400 = 0xDF,
129};
130
131enum StirCtrl1Mask {
132 CTRL1_SDMODE = 0x80,
133 CTRL1_RXSLOW = 0x40,
134 CTRL1_TXPWD = 0x10,
135 CTRL1_RXPWD = 0x08,
136 CTRL1_SRESET = 0x01,
137};
138
139enum StirCtrl2Mask {
140 CTRL2_SPWIDTH = 0x08,
141 CTRL2_REVID = 0x03,
142};
143
144enum StirFifoCtlMask {
145 FIFOCTL_EOF = 0x80,
146 FIFOCTL_UNDER = 0x40,
147 FIFOCTL_OVER = 0x20,
148 FIFOCTL_DIR = 0x10,
149 FIFOCTL_CLR = 0x08,
150 FIFOCTL_EMPTY = 0x04,
151 FIFOCTL_RXERR = 0x02,
152 FIFOCTL_TXERR = 0x01,
153};
154
155enum StirDiagMask {
156 IRDIG_RXHIGH = 0x80,
157 IRDIG_RXLOW = 0x40,
158};
159
160enum StirTestMask {
161 TEST_PLLDOWN = 0x80,
162 TEST_LOOPIR = 0x40,
163 TEST_LOOPUSB = 0x20,
164 TEST_TSTENA = 0x10,
165 TEST_TSTOSC = 0x0F,
166};
167
168struct stir_cb {
169 struct usb_device *usbdev; /* init: probe_irda */
170 struct net_device *netdev; /* network layer */
171 struct irlap_cb *irlap; /* The link layer we are binded to */
172 struct net_device_stats stats; /* network statistics */
173 struct qos_info qos;
174 unsigned speed; /* Current speed */
175
176 wait_queue_head_t thr_wait; /* transmit thread wakeup */
177 struct completion thr_exited;
178 pid_t thr_pid;
179
180 struct sk_buff *tx_pending;
181 void *io_buf; /* transmit/receive buffer */
182 __u8 *fifo_status;
183
184 iobuff_t rx_buff; /* receive unwrap state machine */
185 struct timeval rx_time;
186 int receiving;
187 struct urb *rx_urb;
188};
189
190
191/* These are the currently known USB ids */
192static struct usb_device_id dongles[] = {
193 /* SigmaTel, Inc, STIr4200 IrDA/USB Bridge */
194 { USB_DEVICE(0x066f, 0x4200) },
195 { }
196};
197
198MODULE_DEVICE_TABLE(usb, dongles);
199
200/* Send control message to set dongle register */
201static int write_reg(struct stir_cb *stir, __u16 reg, __u8 value)
202{
203 struct usb_device *dev = stir->usbdev;
204
205 pr_debug("%s: write reg %d = 0x%x\n",
206 stir->netdev->name, reg, value);
207 return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
208 REQ_WRITE_SINGLE,
209 USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_DEVICE,
210 value, reg, NULL, 0,
211 CTRL_TIMEOUT);
212}
213
214/* Send control message to read multiple registers */
215static inline int read_reg(struct stir_cb *stir, __u16 reg,
216 __u8 *data, __u16 count)
217{
218 struct usb_device *dev = stir->usbdev;
219
220 return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
221 REQ_READ_REG,
222 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
223 0, reg, data, count,
224 CTRL_TIMEOUT);
225}
226
227static inline int isfir(u32 speed)
228{
229 return (speed == 4000000);
230}
231
232/*
233 * Prepare a FIR IrDA frame for transmission to the USB dongle. The
234 * FIR transmit frame is documented in the datasheet. It consists of
235 * a two byte 0x55 0xAA sequence, two little-endian length bytes, a
236 * sequence of exactly 16 XBOF bytes of 0x7E, two BOF bytes of 0x7E,
237 * then the data escaped as follows:
238 *
239 * 0x7D -> 0x7D 0x5D
240 * 0x7E -> 0x7D 0x5E
241 * 0x7F -> 0x7D 0x5F
242 *
243 * Then, 4 bytes of little endian (stuffed) FCS follow, then two
244 * trailing EOF bytes of 0x7E.
245 */
246static inline __u8 *stuff_fir(__u8 *p, __u8 c)
247{
248 switch(c) {
249 case 0x7d:
250 case 0x7e:
251 case 0x7f:
252 *p++ = 0x7d;
253 c ^= IRDA_TRANS;
254 /* fall through */
255 default:
256 *p++ = c;
257 }
258 return p;
259}
260
261/* Take raw data in skb and put it wrapped into buf */
262static unsigned wrap_fir_skb(const struct sk_buff *skb, __u8 *buf)
263{
264 __u8 *ptr = buf;
265 __u32 fcs = ~(crc32_le(~0, skb->data, skb->len));
266 __u16 wraplen;
267 int i;
268
269 /* Header */
270 buf[0] = 0x55;
271 buf[1] = 0xAA;
272
273 ptr = buf + STIR_IRDA_HEADER;
274 memset(ptr, 0x7f, 16);
275 ptr += 16;
276
277 /* BOF */
278 *ptr++ = 0x7e;
279 *ptr++ = 0x7e;
280
281 /* Address / Control / Information */
282 for (i = 0; i < skb->len; i++)
283 ptr = stuff_fir(ptr, skb->data[i]);
284
285 /* FCS */
286 ptr = stuff_fir(ptr, fcs & 0xff);
287 ptr = stuff_fir(ptr, (fcs >> 8) & 0xff);
288 ptr = stuff_fir(ptr, (fcs >> 16) & 0xff);
289 ptr = stuff_fir(ptr, (fcs >> 24) & 0xff);
290
291 /* EOFs */
292 *ptr++ = 0x7e;
293 *ptr++ = 0x7e;
294
295 /* Total length, minus the header */
296 wraplen = (ptr - buf) - STIR_IRDA_HEADER;
297 buf[2] = wraplen & 0xff;
298 buf[3] = (wraplen >> 8) & 0xff;
299
300 return wraplen + STIR_IRDA_HEADER;
301}
302
303static unsigned wrap_sir_skb(struct sk_buff *skb, __u8 *buf)
304{
305 __u16 wraplen;
306
307 wraplen = async_wrap_skb(skb, buf + STIR_IRDA_HEADER,
308 STIR_FIFO_SIZE - STIR_IRDA_HEADER);
309 buf[0] = 0x55;
310 buf[1] = 0xAA;
311 buf[2] = wraplen & 0xff;
312 buf[3] = (wraplen >> 8) & 0xff;
313
314 return wraplen + STIR_IRDA_HEADER;
315}
316
317/*
318 * Frame is fully formed in the rx_buff so check crc
319 * and pass up to irlap
320 * setup for next receive
321 */
322static void fir_eof(struct stir_cb *stir)
323{
324 iobuff_t *rx_buff = &stir->rx_buff;
325 int len = rx_buff->len - 4;
326 struct sk_buff *skb, *nskb;
327 __u32 fcs;
328
329 if (unlikely(len <= 0)) {
330 pr_debug("%s: short frame len %d\n",
331 stir->netdev->name, len);
332
333 ++stir->stats.rx_errors;
334 ++stir->stats.rx_length_errors;
335 return;
336 }
337
338 fcs = ~(crc32_le(~0, rx_buff->data, len));
339 if (fcs != le32_to_cpu(get_unaligned((u32 *)(rx_buff->data+len)))) {
340 pr_debug("crc error calc 0x%x len %d\n", fcs, len);
341 stir->stats.rx_errors++;
342 stir->stats.rx_crc_errors++;
343 return;
344 }
345
346 /* if frame is short then just copy it */
347 if (len < IRDA_RX_COPY_THRESHOLD) {
348 nskb = dev_alloc_skb(len + 1);
349 if (unlikely(!nskb)) {
350 ++stir->stats.rx_dropped;
351 return;
352 }
353 skb_reserve(nskb, 1);
354 skb = nskb;
355 memcpy(nskb->data, rx_buff->data, len);
356 } else {
357 nskb = dev_alloc_skb(rx_buff->truesize);
358 if (unlikely(!nskb)) {
359 ++stir->stats.rx_dropped;
360 return;
361 }
362 skb_reserve(nskb, 1);
363 skb = rx_buff->skb;
364 rx_buff->skb = nskb;
365 rx_buff->head = nskb->data;
366 }
367
368 skb_put(skb, len);
369
370 skb->mac.raw = skb->data;
371 skb->protocol = htons(ETH_P_IRDA);
372 skb->dev = stir->netdev;
373
374 netif_rx(skb);
375
376 stir->stats.rx_packets++;
377 stir->stats.rx_bytes += len;
378
379 rx_buff->data = rx_buff->head;
380 rx_buff->len = 0;
381}
382
383/* Unwrap FIR stuffed data and bump it to IrLAP */
384static void stir_fir_chars(struct stir_cb *stir,
385 const __u8 *bytes, int len)
386{
387 iobuff_t *rx_buff = &stir->rx_buff;
388 int i;
389
390 for (i = 0; i < len; i++) {
391 __u8 byte = bytes[i];
392
393 switch(rx_buff->state) {
394 case OUTSIDE_FRAME:
395 /* ignore garbage till start of frame */
396 if (unlikely(byte != FIR_EOF))
397 continue;
398 /* Now receiving frame */
399 rx_buff->state = BEGIN_FRAME;
400
401 /* Time to initialize receive buffer */
402 rx_buff->data = rx_buff->head;
403 rx_buff->len = 0;
404 continue;
405
406 case LINK_ESCAPE:
407 if (byte == FIR_EOF) {
408 pr_debug("%s: got EOF after escape\n",
409 stir->netdev->name);
410 goto frame_error;
411 }
412 rx_buff->state = INSIDE_FRAME;
413 byte ^= IRDA_TRANS;
414 break;
415
416 case BEGIN_FRAME:
417 /* ignore multiple BOF/EOF */
418 if (byte == FIR_EOF)
419 continue;
420 rx_buff->state = INSIDE_FRAME;
421 rx_buff->in_frame = TRUE;
422
423 /* fall through */
424 case INSIDE_FRAME:
425 switch(byte) {
426 case FIR_CE:
427 rx_buff->state = LINK_ESCAPE;
428 continue;
429 case FIR_XBOF:
430 /* 0x7f is not used in this framing */
431 pr_debug("%s: got XBOF without escape\n",
432 stir->netdev->name);
433 goto frame_error;
434 case FIR_EOF:
435 rx_buff->state = OUTSIDE_FRAME;
436 rx_buff->in_frame = FALSE;
437 fir_eof(stir);
438 continue;
439 }
440 break;
441 }
442
443 /* add byte to rx buffer */
444 if (unlikely(rx_buff->len >= rx_buff->truesize)) {
445 pr_debug("%s: fir frame exceeds %d\n",
446 stir->netdev->name, rx_buff->truesize);
447 ++stir->stats.rx_over_errors;
448 goto error_recovery;
449 }
450
451 rx_buff->data[rx_buff->len++] = byte;
452 continue;
453
454 frame_error:
455 ++stir->stats.rx_frame_errors;
456
457 error_recovery:
458 ++stir->stats.rx_errors;
459 rx_buff->state = OUTSIDE_FRAME;
460 rx_buff->in_frame = FALSE;
461 }
462}
463
464/* Unwrap SIR stuffed data and bump it up to IrLAP */
465static void stir_sir_chars(struct stir_cb *stir,
466 const __u8 *bytes, int len)
467{
468 int i;
469
470 for (i = 0; i < len; i++)
471 async_unwrap_char(stir->netdev, &stir->stats,
472 &stir->rx_buff, bytes[i]);
473}
474
475static inline void unwrap_chars(struct stir_cb *stir,
476 const __u8 *bytes, int length)
477{
478 if (isfir(stir->speed))
479 stir_fir_chars(stir, bytes, length);
480 else
481 stir_sir_chars(stir, bytes, length);
482}
483
484/* Mode parameters for each speed */
485static const struct {
486 unsigned speed;
487 __u8 pdclk;
488} stir_modes[] = {
489 { 2400, PDCLK_2400 },
490 { 9600, PDCLK_9600 },
491 { 19200, PDCLK_19200 },
492 { 38400, PDCLK_38400 },
493 { 57600, PDCLK_57600 },
494 { 115200, PDCLK_115200 },
495 { 4000000, PDCLK_4000000 },
496};
497
498
499/*
500 * Setup chip for speed.
501 * Called at startup to initialize the chip
502 * and on speed changes.
503 *
504 * Note: Write multiple registers doesn't appear to work
505 */
506static int change_speed(struct stir_cb *stir, unsigned speed)
507{
508 int i, err;
509 __u8 mode;
510
511 for (i = 0; i < ARRAY_SIZE(stir_modes); ++i) {
512 if (speed == stir_modes[i].speed)
513 goto found;
514 }
515
516 warn("%s: invalid speed %d", stir->netdev->name, speed);
517 return -EINVAL;
518
519 found:
520 pr_debug("speed change from %d to %d\n", stir->speed, speed);
521
522 /* Reset modulator */
523 err = write_reg(stir, REG_CTRL1, CTRL1_SRESET);
524 if (err)
525 goto out;
526
527 /* Undocumented magic to tweak the DPLL */
528 err = write_reg(stir, REG_DPLL, 0x15);
529 if (err)
530 goto out;
531
532 /* Set clock */
533 err = write_reg(stir, REG_PDCLK, stir_modes[i].pdclk);
534 if (err)
535 goto out;
536
537 mode = MODE_NRESET | MODE_FASTRX;
538 if (isfir(speed))
539 mode |= MODE_FIR | MODE_FFRSTEN;
540 else
541 mode |= MODE_SIR;
542
543 if (speed == 2400)
544 mode |= MODE_2400;
545
546 err = write_reg(stir, REG_MODE, mode);
547 if (err)
548 goto out;
549
550 /* This resets TEMIC style transceiver if any. */
551 err = write_reg(stir, REG_CTRL1,
552 CTRL1_SDMODE | (tx_power & 3) << 1);
553 if (err)
554 goto out;
555
556 err = write_reg(stir, REG_CTRL1, (tx_power & 3) << 1);
557 if (err)
558 goto out;
559
560 /* Reset sensitivity */
561 err = write_reg(stir, REG_CTRL2, (rx_sensitivity & 7) << 5);
562 out:
563 stir->speed = speed;
564 return err;
565}
566
567/*
568 * Called from net/core when new frame is available.
569 */
570static int stir_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
571{
572 struct stir_cb *stir = netdev_priv(netdev);
573
574 netif_stop_queue(netdev);
575
576 /* the IRDA wrapping routines don't deal with non linear skb */
577 SKB_LINEAR_ASSERT(skb);
578
579 skb = xchg(&stir->tx_pending, skb);
580 wake_up(&stir->thr_wait);
581
582 /* this should never happen unless stop/wakeup problem */
583 if (unlikely(skb)) {
584 WARN_ON(1);
585 dev_kfree_skb(skb);
586 }
587
588 return 0;
589}
590
591/*
592 * Wait for the transmit FIFO to have space for next data
593 *
594 * If space < 0 then wait till FIFO completely drains.
595 * FYI: can take up to 13 seconds at 2400baud.
596 */
597static int fifo_txwait(struct stir_cb *stir, int space)
598{
599 int err;
600 unsigned long count, status;
601
602 /* Read FIFO status and count */
603 for(;;) {
604 err = read_reg(stir, REG_FIFOCTL, stir->fifo_status,
605 FIFO_REGS_SIZE);
606 if (unlikely(err != FIFO_REGS_SIZE)) {
607 warn("%s: FIFO register read error: %d",
608 stir->netdev->name, err);
609
610 return err;
611 }
612
613 status = stir->fifo_status[0];
614 count = (unsigned)(stir->fifo_status[2] & 0x1f) << 8
615 | stir->fifo_status[1];
616
617 pr_debug("fifo status 0x%lx count %lu\n", status, count);
618
619 /* error when receive/transmit fifo gets confused */
620 if (status & FIFOCTL_RXERR) {
621 stir->stats.rx_fifo_errors++;
622 stir->stats.rx_errors++;
623 break;
624 }
625
626 if (status & FIFOCTL_TXERR) {
627 stir->stats.tx_fifo_errors++;
628 stir->stats.tx_errors++;
629 break;
630 }
631
632 /* is fifo receiving already, or empty */
633 if (!(status & FIFOCTL_DIR)
634 || (status & FIFOCTL_EMPTY))
635 return 0;
636
637 if (signal_pending(current))
638 return -EINTR;
639
640 /* shutting down? */
641 if (!netif_running(stir->netdev)
642 || !netif_device_present(stir->netdev))
643 return -ESHUTDOWN;
644
645 /* only waiting for some space */
646 if (space >= 0 && STIR_FIFO_SIZE - 4 > space + count)
647 return 0;
648
649 /* estimate transfer time for remaining chars */
650 msleep((count * 8000) / stir->speed);
651 }
652
653 err = write_reg(stir, REG_FIFOCTL, FIFOCTL_CLR);
654 if (err)
655 return err;
656 err = write_reg(stir, REG_FIFOCTL, 0);
657 if (err)
658 return err;
659
660 return 0;
661}
662
663
664/* Wait for turnaround delay before starting transmit. */
665static void turnaround_delay(const struct stir_cb *stir, long us)
666{
667 long ticks;
668 struct timeval now;
669
670 if (us <= 0)
671 return;
672
673 do_gettimeofday(&now);
674 if (now.tv_sec - stir->rx_time.tv_sec > 0)
675 us -= USEC_PER_SEC;
676 us -= now.tv_usec - stir->rx_time.tv_usec;
677 if (us < 10)
678 return;
679
680 ticks = us / (1000000 / HZ);
681 if (ticks > 0) {
682 current->state = TASK_INTERRUPTIBLE;
683 schedule_timeout(1 + ticks);
684 } else
685 udelay(us);
686}
687
688/*
689 * Start receiver by submitting a request to the receive pipe.
690 * If nothing is available it will return after rx_interval.
691 */
692static int receive_start(struct stir_cb *stir)
693{
694 /* reset state */
695 stir->receiving = 1;
696
697 stir->rx_buff.in_frame = FALSE;
698 stir->rx_buff.state = OUTSIDE_FRAME;
699
700 stir->rx_urb->status = 0;
701 return usb_submit_urb(stir->rx_urb, GFP_KERNEL);
702}
703
704/* Stop all pending receive Urb's */
705static void receive_stop(struct stir_cb *stir)
706{
707 stir->receiving = 0;
708 usb_kill_urb(stir->rx_urb);
709
710 if (stir->rx_buff.in_frame)
711 stir->stats.collisions++;
712}
713/*
714 * Wrap data in socket buffer and send it.
715 */
716static void stir_send(struct stir_cb *stir, struct sk_buff *skb)
717{
718 unsigned wraplen;
719 int first_frame = 0;
720
721 /* if receiving, need to turnaround */
722 if (stir->receiving) {
723 receive_stop(stir);
724 turnaround_delay(stir, irda_get_mtt(skb));
725 first_frame = 1;
726 }
727
728 if (isfir(stir->speed))
729 wraplen = wrap_fir_skb(skb, stir->io_buf);
730 else
731 wraplen = wrap_sir_skb(skb, stir->io_buf);
732
733 /* check for space available in fifo */
734 if (!first_frame)
735 fifo_txwait(stir, wraplen);
736
737 stir->stats.tx_packets++;
738 stir->stats.tx_bytes += skb->len;
739 stir->netdev->trans_start = jiffies;
740 pr_debug("send %d (%d)\n", skb->len, wraplen);
741
742 if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
743 stir->io_buf, wraplen,
744 NULL, TRANSMIT_TIMEOUT))
745 stir->stats.tx_errors++;
746}
747
748/*
749 * Transmit state machine thread
750 */
751static int stir_transmit_thread(void *arg)
752{
753 struct stir_cb *stir = arg;
754 struct net_device *dev = stir->netdev;
755 struct sk_buff *skb;
756
757 daemonize("%s", dev->name);
758 allow_signal(SIGTERM);
759
760 while (netif_running(dev)
761 && netif_device_present(dev)
762 && !signal_pending(current))
763 {
764#ifdef CONFIG_PM
765 /* if suspending, then power off and wait */
766 if (unlikely(current->flags & PF_FREEZE)) {
767 if (stir->receiving)
768 receive_stop(stir);
769 else
770 fifo_txwait(stir, -1);
771
772 write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
773
774 refrigerator(PF_FREEZE);
775
776 if (change_speed(stir, stir->speed))
777 break;
778 }
779#endif
780
781 /* if something to send? */
782 skb = xchg(&stir->tx_pending, NULL);
783 if (skb) {
784 unsigned new_speed = irda_get_next_speed(skb);
785 netif_wake_queue(dev);
786
787 if (skb->len > 0)
788 stir_send(stir, skb);
789 dev_kfree_skb(skb);
790
791 if ((new_speed != -1) && (stir->speed != new_speed)) {
792 if (fifo_txwait(stir, -1) ||
793 change_speed(stir, new_speed))
794 break;
795 }
796 continue;
797 }
798
799 /* nothing to send? start receiving */
800 if (!stir->receiving
801 && irda_device_txqueue_empty(dev)) {
802 /* Wait otherwise chip gets confused. */
803 if (fifo_txwait(stir, -1))
804 break;
805
806 if (unlikely(receive_start(stir))) {
807 if (net_ratelimit())
808 info("%s: receive usb submit failed",
809 stir->netdev->name);
810 stir->receiving = 0;
811 msleep(10);
812 continue;
813 }
814 }
815
816 /* sleep if nothing to send */
817 wait_event_interruptible(stir->thr_wait, stir->tx_pending);
818 }
819
820 complete_and_exit (&stir->thr_exited, 0);
821}
822
823
824/*
825 * USB bulk receive completion callback.
826 * Wakes up every ms (usb round trip) with wrapped
827 * data.
828 */
829static void stir_rcv_irq(struct urb *urb, struct pt_regs *regs)
830{
831 struct stir_cb *stir = urb->context;
832 int err;
833
834 /* in process of stopping, just drop data */
835 if (!netif_running(stir->netdev))
836 return;
837
838 /* unlink, shutdown, unplug, other nasties */
839 if (urb->status != 0)
840 return;
841
842 if (urb->actual_length > 0) {
843 pr_debug("receive %d\n", urb->actual_length);
844 unwrap_chars(stir, urb->transfer_buffer,
845 urb->actual_length);
846
847 stir->netdev->last_rx = jiffies;
848 do_gettimeofday(&stir->rx_time);
849 }
850
851 /* kernel thread is stopping receiver don't resubmit */
852 if (!stir->receiving)
853 return;
854
855 /* resubmit existing urb */
856 err = usb_submit_urb(urb, GFP_ATOMIC);
857
858 /* in case of error, the kernel thread will restart us */
859 if (err) {
860 warn("%s: usb receive submit error: %d",
861 stir->netdev->name, err);
862 stir->receiving = 0;
863 wake_up(&stir->thr_wait);
864 }
865}
866
867/*
868 * Function stir_net_open (dev)
869 *
870 * Network device is taken up. Usually this is done by "ifconfig irda0 up"
871 */
872static int stir_net_open(struct net_device *netdev)
873{
874 struct stir_cb *stir = netdev_priv(netdev);
875 int err;
876 char hwname[16];
877
878 err = usb_clear_halt(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1));
879 if (err)
880 goto err_out1;
881 err = usb_clear_halt(stir->usbdev, usb_rcvbulkpipe(stir->usbdev, 2));
882 if (err)
883 goto err_out1;
884
885 err = change_speed(stir, 9600);
886 if (err)
887 goto err_out1;
888
889 err = -ENOMEM;
890
891 /* Initialize for SIR/FIR to copy data directly into skb. */
892 stir->receiving = 0;
893 stir->rx_buff.truesize = IRDA_SKB_MAX_MTU;
894 stir->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
895 if (!stir->rx_buff.skb)
896 goto err_out1;
897
898 skb_reserve(stir->rx_buff.skb, 1);
899 stir->rx_buff.head = stir->rx_buff.skb->data;
900 do_gettimeofday(&stir->rx_time);
901
902 stir->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
903 if (!stir->rx_urb)
904 goto err_out2;
905
906 stir->io_buf = kmalloc(STIR_FIFO_SIZE, GFP_KERNEL);
907 if (!stir->io_buf)
908 goto err_out3;
909
910 usb_fill_bulk_urb(stir->rx_urb, stir->usbdev,
911 usb_rcvbulkpipe(stir->usbdev, 2),
912 stir->io_buf, STIR_FIFO_SIZE,
913 stir_rcv_irq, stir);
914
915 stir->fifo_status = kmalloc(FIFO_REGS_SIZE, GFP_KERNEL);
916 if (!stir->fifo_status)
917 goto err_out4;
918
919 /*
920 * Now that everything should be initialized properly,
921 * Open new IrLAP layer instance to take care of us...
922 * Note : will send immediately a speed change...
923 */
924 sprintf(hwname, "usb#%d", stir->usbdev->devnum);
925 stir->irlap = irlap_open(netdev, &stir->qos, hwname);
926 if (!stir->irlap) {
927 err("stir4200: irlap_open failed");
928 goto err_out5;
929 }
930
931 /** Start kernel thread for transmit. */
932 stir->thr_pid = kernel_thread(stir_transmit_thread, stir,
933 CLONE_FS|CLONE_FILES);
934 if (stir->thr_pid < 0) {
935 err = stir->thr_pid;
936 err("stir4200: unable to start kernel thread");
937 goto err_out6;
938 }
939
940 netif_start_queue(netdev);
941
942 return 0;
943
944 err_out6:
945 irlap_close(stir->irlap);
946 err_out5:
947 kfree(stir->fifo_status);
948 err_out4:
949 kfree(stir->io_buf);
950 err_out3:
951 usb_free_urb(stir->rx_urb);
952 err_out2:
953 kfree_skb(stir->rx_buff.skb);
954 err_out1:
955 return err;
956}
957
958/*
959 * Function stir_net_close (stir)
960 *
961 * Network device is taken down. Usually this is done by
962 * "ifconfig irda0 down"
963 */
964static int stir_net_close(struct net_device *netdev)
965{
966 struct stir_cb *stir = netdev_priv(netdev);
967
968 /* Stop transmit processing */
969 netif_stop_queue(netdev);
970
971 /* Kill transmit thread */
972 kill_proc(stir->thr_pid, SIGTERM, 1);
973 wait_for_completion(&stir->thr_exited);
974 kfree(stir->fifo_status);
975
976 /* Mop up receive urb's */
977 usb_kill_urb(stir->rx_urb);
978
979 kfree(stir->io_buf);
980 usb_free_urb(stir->rx_urb);
981 kfree_skb(stir->rx_buff.skb);
982
983 /* Stop and remove instance of IrLAP */
984 if (stir->irlap)
985 irlap_close(stir->irlap);
986
987 stir->irlap = NULL;
988
989 return 0;
990}
991
992/*
993 * IOCTLs : Extra out-of-band network commands...
994 */
995static int stir_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
996{
997 struct if_irda_req *irq = (struct if_irda_req *) rq;
998 struct stir_cb *stir = netdev_priv(netdev);
999 int ret = 0;
1000
1001 switch (cmd) {
1002 case SIOCSBANDWIDTH: /* Set bandwidth */
1003 if (!capable(CAP_NET_ADMIN))
1004 return -EPERM;
1005
1006 /* Check if the device is still there */
1007 if (netif_device_present(stir->netdev))
1008 ret = change_speed(stir, irq->ifr_baudrate);
1009 break;
1010
1011 case SIOCSMEDIABUSY: /* Set media busy */
1012 if (!capable(CAP_NET_ADMIN))
1013 return -EPERM;
1014
1015 /* Check if the IrDA stack is still there */
1016 if (netif_running(stir->netdev))
1017 irda_device_set_media_busy(stir->netdev, TRUE);
1018 break;
1019
1020 case SIOCGRECEIVING:
1021 /* Only approximately true */
1022 irq->ifr_receiving = stir->receiving;
1023 break;
1024
1025 default:
1026 ret = -EOPNOTSUPP;
1027 }
1028
1029 return ret;
1030}
1031
1032/*
1033 * Get device stats (for /proc/net/dev and ifconfig)
1034 */
1035static struct net_device_stats *stir_net_get_stats(struct net_device *netdev)
1036{
1037 struct stir_cb *stir = netdev_priv(netdev);
1038 return &stir->stats;
1039}
1040
1041/*
1042 * This routine is called by the USB subsystem for each new device
1043 * in the system. We need to check if the device is ours, and in
1044 * this case start handling it.
1045 * Note : it might be worth protecting this function by a global
1046 * spinlock... Or not, because maybe USB already deal with that...
1047 */
1048static int stir_probe(struct usb_interface *intf,
1049 const struct usb_device_id *id)
1050{
1051 struct usb_device *dev = interface_to_usbdev(intf);
1052 struct stir_cb *stir = NULL;
1053 struct net_device *net;
1054 int ret = -ENOMEM;
1055
1056 /* Allocate network device container. */
1057 net = alloc_irdadev(sizeof(*stir));
1058 if(!net)
1059 goto err_out1;
1060
1061 SET_MODULE_OWNER(net);
1062 SET_NETDEV_DEV(net, &intf->dev);
1063 stir = netdev_priv(net);
1064 stir->netdev = net;
1065 stir->usbdev = dev;
1066
1067 ret = usb_reset_configuration(dev);
1068 if (ret != 0) {
1069 err("stir4200: usb reset configuration failed");
1070 goto err_out2;
1071 }
1072
1073 printk(KERN_INFO "SigmaTel STIr4200 IRDA/USB found at address %d, "
1074 "Vendor: %x, Product: %x\n",
1075 dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
1076 le16_to_cpu(dev->descriptor.idProduct));
1077
1078 /* Initialize QoS for this device */
1079 irda_init_max_qos_capabilies(&stir->qos);
1080
1081 /* That's the Rx capability. */
1082 stir->qos.baud_rate.bits &= IR_2400 | IR_9600 | IR_19200 |
1083 IR_38400 | IR_57600 | IR_115200 |
1084 (IR_4000000 << 8);
1085 stir->qos.min_turn_time.bits &= qos_mtt_bits;
1086 irda_qos_bits_to_value(&stir->qos);
1087
1088 init_completion (&stir->thr_exited);
1089 init_waitqueue_head (&stir->thr_wait);
1090
1091 /* Override the network functions we need to use */
1092 net->hard_start_xmit = stir_hard_xmit;
1093 net->open = stir_net_open;
1094 net->stop = stir_net_close;
1095 net->get_stats = stir_net_get_stats;
1096 net->do_ioctl = stir_net_ioctl;
1097
1098 ret = register_netdev(net);
1099 if (ret != 0)
1100 goto err_out2;
1101
1102 info("IrDA: Registered SigmaTel device %s", net->name);
1103
1104 usb_set_intfdata(intf, stir);
1105
1106 return 0;
1107
1108err_out2:
1109 free_netdev(net);
1110err_out1:
1111 return ret;
1112}
1113
1114/*
1115 * The current device is removed, the USB layer tell us to shut it down...
1116 */
1117static void stir_disconnect(struct usb_interface *intf)
1118{
1119 struct stir_cb *stir = usb_get_intfdata(intf);
1120
1121 if (!stir)
1122 return;
1123
1124 unregister_netdev(stir->netdev);
1125 free_netdev(stir->netdev);
1126
1127 usb_set_intfdata(intf, NULL);
1128}
1129
1130#ifdef CONFIG_PM
1131/* Power management suspend, so power off the transmitter/receiver */
1132static int stir_suspend(struct usb_interface *intf, u32 state)
1133{
1134 struct stir_cb *stir = usb_get_intfdata(intf);
1135
1136 netif_device_detach(stir->netdev);
1137 return 0;
1138}
1139
1140/* Coming out of suspend, so reset hardware */
1141static int stir_resume(struct usb_interface *intf)
1142{
1143 struct stir_cb *stir = usb_get_intfdata(intf);
1144
1145 netif_device_attach(stir->netdev);
1146
1147 /* receiver restarted when send thread wakes up */
1148 return 0;
1149}
1150#endif
1151
1152/*
1153 * USB device callbacks
1154 */
1155static struct usb_driver irda_driver = {
1156 .owner = THIS_MODULE,
1157 .name = "stir4200",
1158 .probe = stir_probe,
1159 .disconnect = stir_disconnect,
1160 .id_table = dongles,
1161#ifdef CONFIG_PM
1162 .suspend = stir_suspend,
1163 .resume = stir_resume,
1164#endif
1165};
1166
1167/*
1168 * Module insertion
1169 */
1170static int __init stir_init(void)
1171{
1172 return usb_register(&irda_driver);
1173}
1174module_init(stir_init);
1175
1176/*
1177 * Module removal
1178 */
1179static void __exit stir_cleanup(void)
1180{
1181 /* Deregister the driver and remove all pending instances */
1182 usb_deregister(&irda_driver);
1183}
1184module_exit(stir_cleanup);
diff --git a/drivers/net/irda/tekram-sir.c b/drivers/net/irda/tekram-sir.c
new file mode 100644
index 000000000000..0dd6bc7af3f2
--- /dev/null
+++ b/drivers/net/irda/tekram-sir.c
@@ -0,0 +1,232 @@
1/*********************************************************************
2 *
3 * Filename: tekram.c
4 * Version: 1.3
5 * Description: Implementation of the Tekram IrMate IR-210B dongle
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Wed Oct 21 20:02:35 1998
9 * Modified at: Sun Oct 27 22:02:38 2002
10 * Modified by: Martin Diehl <mad@mdiehl.de>
11 *
12 * Copyright (c) 1998-1999 Dag Brattli,
13 * Copyright (c) 2002 Martin Diehl,
14 * All Rights Reserved.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of
19 * the License, or (at your option) any later version.
20 *
21 * Neither Dag Brattli nor University of Tromsų admit liability nor
22 * provide warranty for any of this software. This material is
23 * provided "AS-IS" and at no charge.
24 *
25 ********************************************************************/
26
27#include <linux/module.h>
28#include <linux/delay.h>
29#include <linux/init.h>
30
31#include <net/irda/irda.h>
32
33#include "sir-dev.h"
34
35static int tekram_delay = 150; /* default is 150 ms */
36module_param(tekram_delay, int, 0);
37MODULE_PARM_DESC(tekram_delay, "tekram dongle write complete delay");
38
39static int tekram_open(struct sir_dev *);
40static int tekram_close(struct sir_dev *);
41static int tekram_change_speed(struct sir_dev *, unsigned);
42static int tekram_reset(struct sir_dev *);
43
44#define TEKRAM_115200 0x00
45#define TEKRAM_57600 0x01
46#define TEKRAM_38400 0x02
47#define TEKRAM_19200 0x03
48#define TEKRAM_9600 0x04
49
50#define TEKRAM_PW 0x10 /* Pulse select bit */
51
52static struct dongle_driver tekram = {
53 .owner = THIS_MODULE,
54 .driver_name = "Tekram IR-210B",
55 .type = IRDA_TEKRAM_DONGLE,
56 .open = tekram_open,
57 .close = tekram_close,
58 .reset = tekram_reset,
59 .set_speed = tekram_change_speed,
60};
61
62static int __init tekram_sir_init(void)
63{
64 if (tekram_delay < 1 || tekram_delay > 500)
65 tekram_delay = 200;
66 IRDA_DEBUG(1, "%s - using %d ms delay\n",
67 tekram.driver_name, tekram_delay);
68 return irda_register_dongle(&tekram);
69}
70
71static void __exit tekram_sir_cleanup(void)
72{
73 irda_unregister_dongle(&tekram);
74}
75
76static int tekram_open(struct sir_dev *dev)
77{
78 struct qos_info *qos = &dev->qos;
79
80 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
81
82 sirdev_set_dtr_rts(dev, TRUE, TRUE);
83
84 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
85 qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
86 irda_qos_bits_to_value(qos);
87
88 /* irda thread waits 50 msec for power settling */
89
90 return 0;
91}
92
93static int tekram_close(struct sir_dev *dev)
94{
95 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
96
97 /* Power off dongle */
98 sirdev_set_dtr_rts(dev, FALSE, FALSE);
99
100 return 0;
101}
102
103/*
104 * Function tekram_change_speed (dev, state, speed)
105 *
106 * Set the speed for the Tekram IRMate 210 type dongle. Warning, this
107 * function must be called with a process context!
108 *
109 * Algorithm
110 * 1. clear DTR
111 * 2. set RTS, and wait at least 7 us
112 * 3. send Control Byte to the IR-210 through TXD to set new baud rate
113 * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
114 * it takes about 100 msec)
115 *
116 * [oops, why 100 msec? sending 1 byte (10 bits) takes 1.05 msec
117 * - is this probably to compensate for delays in tty layer?]
118 *
119 * 5. clear RTS (return to NORMAL Operation)
120 * 6. wait at least 50 us, new setting (baud rate, etc) takes effect here
121 * after
122 */
123
124#define TEKRAM_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1)
125
126static int tekram_change_speed(struct sir_dev *dev, unsigned speed)
127{
128 unsigned state = dev->fsm.substate;
129 unsigned delay = 0;
130 u8 byte;
131 static int ret = 0;
132
133 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
134
135 switch(state) {
136 case SIRDEV_STATE_DONGLE_SPEED:
137
138 switch (speed) {
139 default:
140 speed = 9600;
141 ret = -EINVAL;
142 /* fall thru */
143 case 9600:
144 byte = TEKRAM_PW|TEKRAM_9600;
145 break;
146 case 19200:
147 byte = TEKRAM_PW|TEKRAM_19200;
148 break;
149 case 38400:
150 byte = TEKRAM_PW|TEKRAM_38400;
151 break;
152 case 57600:
153 byte = TEKRAM_PW|TEKRAM_57600;
154 break;
155 case 115200:
156 byte = TEKRAM_115200;
157 break;
158 }
159
160 /* Set DTR, Clear RTS */
161 sirdev_set_dtr_rts(dev, TRUE, FALSE);
162
163 /* Wait at least 7us */
164 udelay(14);
165
166 /* Write control byte */
167 sirdev_raw_write(dev, &byte, 1);
168
169 dev->speed = speed;
170
171 state = TEKRAM_STATE_WAIT_SPEED;
172 delay = tekram_delay;
173 break;
174
175 case TEKRAM_STATE_WAIT_SPEED:
176 /* Set DTR, Set RTS */
177 sirdev_set_dtr_rts(dev, TRUE, TRUE);
178 udelay(50);
179 break;
180
181 default:
182 IRDA_ERROR("%s - undefined state %d\n", __FUNCTION__, state);
183 ret = -EINVAL;
184 break;
185 }
186
187 dev->fsm.substate = state;
188 return (delay > 0) ? delay : ret;
189}
190
191/*
192 * Function tekram_reset (driver)
193 *
194 * This function resets the tekram dongle. Warning, this function
195 * must be called with a process context!!
196 *
197 * Algorithm:
198 * 0. Clear RTS and DTR, and wait 50 ms (power off the IR-210 )
199 * 1. clear RTS
200 * 2. set DTR, and wait at least 1 ms
201 * 3. clear DTR to SPACE state, wait at least 50 us for further
202 * operation
203 */
204
205static int tekram_reset(struct sir_dev *dev)
206{
207 IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
208
209 /* Clear DTR, Set RTS */
210 sirdev_set_dtr_rts(dev, FALSE, TRUE);
211
212 /* Should sleep 1 ms */
213 msleep(1);
214
215 /* Set DTR, Set RTS */
216 sirdev_set_dtr_rts(dev, TRUE, TRUE);
217
218 /* Wait at least 50 us */
219 udelay(75);
220
221 dev->speed = 9600;
222
223 return 0;
224}
225
226MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
227MODULE_DESCRIPTION("Tekram IrMate IR-210B dongle driver");
228MODULE_LICENSE("GPL");
229MODULE_ALIAS("irda-dongle-0"); /* IRDA_TEKRAM_DONGLE */
230
231module_init(tekram_sir_init);
232module_exit(tekram_sir_cleanup);
diff --git a/drivers/net/irda/tekram.c b/drivers/net/irda/tekram.c
new file mode 100644
index 000000000000..8f6258221cb0
--- /dev/null
+++ b/drivers/net/irda/tekram.c
@@ -0,0 +1,282 @@
1/*********************************************************************
2 *
3 * Filename: tekram.c
4 * Version: 1.2
5 * Description: Implementation of the Tekram IrMate IR-210B dongle
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Wed Oct 21 20:02:35 1998
9 * Modified at: Fri Dec 17 09:13:09 1999
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-1999 Dag Brattli, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 * Neither Dag Brattli nor University of Tromsų admit liability nor
20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge.
22 *
23 ********************************************************************/
24
25#include <linux/module.h>
26#include <linux/delay.h>
27#include <linux/tty.h>
28#include <linux/init.h>
29
30#include <net/irda/irda.h>
31#include <net/irda/irda_device.h>
32
33static void tekram_open(dongle_t *self, struct qos_info *qos);
34static void tekram_close(dongle_t *self);
35static int tekram_change_speed(struct irda_task *task);
36static int tekram_reset(struct irda_task *task);
37
38#define TEKRAM_115200 0x00
39#define TEKRAM_57600 0x01
40#define TEKRAM_38400 0x02
41#define TEKRAM_19200 0x03
42#define TEKRAM_9600 0x04
43
44#define TEKRAM_PW 0x10 /* Pulse select bit */
45
46static struct dongle_reg dongle = {
47 .type = IRDA_TEKRAM_DONGLE,
48 .open = tekram_open,
49 .close = tekram_close,
50 .reset = tekram_reset,
51 .change_speed = tekram_change_speed,
52 .owner = THIS_MODULE,
53};
54
55static int __init tekram_init(void)
56{
57 return irda_device_register_dongle(&dongle);
58}
59
60static void __exit tekram_cleanup(void)
61{
62 irda_device_unregister_dongle(&dongle);
63}
64
65static void tekram_open(dongle_t *self, struct qos_info *qos)
66{
67 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
68
69 qos->baud_rate.bits &= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
70 qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */
71 irda_qos_bits_to_value(qos);
72}
73
74static void tekram_close(dongle_t *self)
75{
76 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
77
78 /* Power off dongle */
79 self->set_dtr_rts(self->dev, FALSE, FALSE);
80
81 if (self->reset_task)
82 irda_task_delete(self->reset_task);
83 if (self->speed_task)
84 irda_task_delete(self->speed_task);
85}
86
87/*
88 * Function tekram_change_speed (dev, state, speed)
89 *
90 * Set the speed for the Tekram IRMate 210 type dongle. Warning, this
91 * function must be called with a process context!
92 *
93 * Algorithm
94 * 1. clear DTR
95 * 2. set RTS, and wait at least 7 us
96 * 3. send Control Byte to the IR-210 through TXD to set new baud rate
97 * wait until the stop bit of Control Byte is sent (for 9600 baud rate,
98 * it takes about 100 msec)
99 * 5. clear RTS (return to NORMAL Operation)
100 * 6. wait at least 50 us, new setting (baud rate, etc) takes effect here
101 * after
102 */
103static int tekram_change_speed(struct irda_task *task)
104{
105 dongle_t *self = (dongle_t *) task->instance;
106 __u32 speed = (__u32) task->param;
107 __u8 byte;
108 int ret = 0;
109
110 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
111
112 IRDA_ASSERT(task != NULL, return -1;);
113
114 if (self->speed_task && self->speed_task != task) {
115 IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__ );
116 return msecs_to_jiffies(10);
117 } else
118 self->speed_task = task;
119
120 switch (speed) {
121 default:
122 case 9600:
123 byte = TEKRAM_PW|TEKRAM_9600;
124 break;
125 case 19200:
126 byte = TEKRAM_PW|TEKRAM_19200;
127 break;
128 case 38400:
129 byte = TEKRAM_PW|TEKRAM_38400;
130 break;
131 case 57600:
132 byte = TEKRAM_PW|TEKRAM_57600;
133 break;
134 case 115200:
135 byte = TEKRAM_115200;
136 break;
137 }
138
139 switch (task->state) {
140 case IRDA_TASK_INIT:
141 case IRDA_TASK_CHILD_INIT:
142 /*
143 * Need to reset the dongle and go to 9600 bps before
144 * programming
145 */
146 if (irda_task_execute(self, tekram_reset, NULL, task,
147 (void *) speed))
148 {
149 /* Dongle need more time to reset */
150 irda_task_next_state(task, IRDA_TASK_CHILD_WAIT);
151
152 /* Give reset 1 sec to finish */
153 ret = msecs_to_jiffies(1000);
154 } else
155 irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
156 break;
157 case IRDA_TASK_CHILD_WAIT:
158 IRDA_WARNING("%s(), resetting dongle timed out!\n",
159 __FUNCTION__);
160 ret = -1;
161 break;
162 case IRDA_TASK_CHILD_DONE:
163 /* Set DTR, Clear RTS */
164 self->set_dtr_rts(self->dev, TRUE, FALSE);
165
166 /* Wait at least 7us */
167 udelay(14);
168
169 /* Write control byte */
170 self->write(self->dev, &byte, 1);
171
172 irda_task_next_state(task, IRDA_TASK_WAIT);
173
174 /* Wait at least 100 ms */
175 ret = msecs_to_jiffies(150);
176 break;
177 case IRDA_TASK_WAIT:
178 /* Set DTR, Set RTS */
179 self->set_dtr_rts(self->dev, TRUE, TRUE);
180
181 irda_task_next_state(task, IRDA_TASK_DONE);
182 self->speed_task = NULL;
183 break;
184 default:
185 IRDA_ERROR("%s(), unknown state %d\n",
186 __FUNCTION__, task->state);
187 irda_task_next_state(task, IRDA_TASK_DONE);
188 self->speed_task = NULL;
189 ret = -1;
190 break;
191 }
192 return ret;
193}
194
195/*
196 * Function tekram_reset (driver)
197 *
198 * This function resets the tekram dongle. Warning, this function
199 * must be called with a process context!!
200 *
201 * Algorithm:
202 * 0. Clear RTS and DTR, and wait 50 ms (power off the IR-210 )
203 * 1. clear RTS
204 * 2. set DTR, and wait at least 1 ms
205 * 3. clear DTR to SPACE state, wait at least 50 us for further
206 * operation
207 */
208int tekram_reset(struct irda_task *task)
209{
210 dongle_t *self = (dongle_t *) task->instance;
211 int ret = 0;
212
213 IRDA_DEBUG(2, "%s()\n", __FUNCTION__ );
214
215 IRDA_ASSERT(task != NULL, return -1;);
216
217 if (self->reset_task && self->reset_task != task) {
218 IRDA_DEBUG(0, "%s(), busy!\n", __FUNCTION__ );
219 return msecs_to_jiffies(10);
220 } else
221 self->reset_task = task;
222
223 /* Power off dongle */
224 //self->set_dtr_rts(self->dev, FALSE, FALSE);
225 self->set_dtr_rts(self->dev, TRUE, TRUE);
226
227 switch (task->state) {
228 case IRDA_TASK_INIT:
229 irda_task_next_state(task, IRDA_TASK_WAIT1);
230
231 /* Sleep 50 ms */
232 ret = msecs_to_jiffies(50);
233 break;
234 case IRDA_TASK_WAIT1:
235 /* Clear DTR, Set RTS */
236 self->set_dtr_rts(self->dev, FALSE, TRUE);
237
238 irda_task_next_state(task, IRDA_TASK_WAIT2);
239
240 /* Should sleep 1 ms */
241 ret = msecs_to_jiffies(1);
242 break;
243 case IRDA_TASK_WAIT2:
244 /* Set DTR, Set RTS */
245 self->set_dtr_rts(self->dev, TRUE, TRUE);
246
247 /* Wait at least 50 us */
248 udelay(75);
249
250 irda_task_next_state(task, IRDA_TASK_DONE);
251 self->reset_task = NULL;
252 break;
253 default:
254 IRDA_ERROR("%s(), unknown state %d\n",
255 __FUNCTION__, task->state);
256 irda_task_next_state(task, IRDA_TASK_DONE);
257 self->reset_task = NULL;
258 ret = -1;
259 }
260 return ret;
261}
262
263MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
264MODULE_DESCRIPTION("Tekram IrMate IR-210B dongle driver");
265MODULE_LICENSE("GPL");
266MODULE_ALIAS("irda-dongle-0"); /* IRDA_TEKRAM_DONGLE */
267
268/*
269 * Function init_module (void)
270 *
271 * Initialize Tekram module
272 *
273 */
274module_init(tekram_init);
275
276/*
277 * Function cleanup_module (void)
278 *
279 * Cleanup Tekram module
280 *
281 */
282module_exit(tekram_cleanup);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
new file mode 100644
index 000000000000..8bafb455c102
--- /dev/null
+++ b/drivers/net/irda/via-ircc.c
@@ -0,0 +1,1676 @@
1/********************************************************************
2 Filename: via-ircc.c
3 Version: 1.0
4 Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
5 Author: VIA Technologies,inc
6 Date : 08/06/2003
7
8Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10This program is free software; you can redistribute it and/or modify it under
11the terms of the GNU General Public License as published by the Free Software
12Foundation; either version 2, or (at your option) any later version.
13
14This program is distributed in the hope that it will be useful, but WITHOUT
15ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17See the GNU General Public License for more details.
18
19You should have received a copy of the GNU General Public License along with
20this program; if not, write to the Free Software Foundation, Inc.,
2159 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22
23F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
24F02 Oct/28/02: Add SB device ID for 3147 and 3177.
25 Comment :
26 jul/09/2002 : only implement two kind of dongle currently.
27 Oct/02/2002 : work on VT8231 and VT8233 .
28 Aug/06/2003 : change driver format to pci driver .
29
302004-02-16: <sda@bdit.de>
31- Removed unneeded 'legacy' pci stuff.
32- Make sure SIR mode is set (hw_init()) before calling mode-dependant stuff.
33- On speed change from core, don't send SIR frame with new speed.
34 Use current speed and change speeds later.
35- Make module-param dongle_id actually work.
36- New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
37 Tested with home-grown PCB on EPIA boards.
38- Code cleanup.
39
40 ********************************************************************/
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/types.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/ioport.h>
47#include <linux/delay.h>
48#include <linux/slab.h>
49#include <linux/init.h>
50#include <linux/rtnetlink.h>
51#include <linux/pci.h>
52#include <linux/dma-mapping.h>
53
54#include <asm/io.h>
55#include <asm/dma.h>
56#include <asm/byteorder.h>
57
58#include <linux/pm.h>
59
60#include <net/irda/wrapper.h>
61#include <net/irda/irda.h>
62#include <net/irda/irda_device.h>
63
64#include "via-ircc.h"
65
66#define VIA_MODULE_NAME "via-ircc"
67#define CHIP_IO_EXTENT 0x40
68
69static char *driver_name = VIA_MODULE_NAME;
70
71/* Module parameters */
72static int qos_mtt_bits = 0x07; /* 1 ms or more */
73static int dongle_id = 0; /* default: probe */
74
75/* We can't guess the type of connected dongle, user *must* supply it. */
76module_param(dongle_id, int, 0);
77
78/* FIXME : we should not need this, because instances should be automatically
79 * managed by the PCI layer. Especially that we seem to only be using the
80 * first entry. Jean II */
81/* Max 4 instances for now */
82static struct via_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL };
83
84/* Some prototypes */
85static int via_ircc_open(int i, chipio_t * info, unsigned int id);
86static int via_ircc_close(struct via_ircc_cb *self);
87static int via_ircc_dma_receive(struct via_ircc_cb *self);
88static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
89 int iobase);
90static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
91 struct net_device *dev);
92static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
93 struct net_device *dev);
94static void via_hw_init(struct via_ircc_cb *self);
95static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
96static irqreturn_t via_ircc_interrupt(int irq, void *dev_id,
97 struct pt_regs *regs);
98static int via_ircc_is_receiving(struct via_ircc_cb *self);
99static int via_ircc_read_dongle_id(int iobase);
100
101static int via_ircc_net_open(struct net_device *dev);
102static int via_ircc_net_close(struct net_device *dev);
103static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
104 int cmd);
105static struct net_device_stats *via_ircc_net_get_stats(struct net_device
106 *dev);
107static void via_ircc_change_dongle_speed(int iobase, int speed,
108 int dongle_id);
109static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
110static void hwreset(struct via_ircc_cb *self);
111static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
112static int upload_rxdata(struct via_ircc_cb *self, int iobase);
113static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id);
114static void __devexit via_remove_one (struct pci_dev *pdev);
115
116/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
117static void iodelay(int udelay)
118{
119 u8 data;
120 int i;
121
122 for (i = 0; i < udelay; i++) {
123 data = inb(0x80);
124 }
125}
126
127static struct pci_device_id via_pci_tbl[] = {
128 { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
129 { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
130 { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
131 { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
132 { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
133 { 0, }
134};
135
136MODULE_DEVICE_TABLE(pci,via_pci_tbl);
137
138
139static struct pci_driver via_driver = {
140 .name = VIA_MODULE_NAME,
141 .id_table = via_pci_tbl,
142 .probe = via_init_one,
143 .remove = __devexit_p(via_remove_one),
144};
145
146
147/*
148 * Function via_ircc_init ()
149 *
150 * Initialize chip. Just find out chip type and resource.
151 */
152static int __init via_ircc_init(void)
153{
154 int rc;
155
156 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
157
158 rc = pci_register_driver(&via_driver);
159 if (rc < 0) {
160 IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
161 __FUNCTION__, rc);
162 return -ENODEV;
163 }
164 return 0;
165}
166
167static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id)
168{
169 int rc;
170 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
171 u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
172 chipio_t info;
173
174 IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __FUNCTION__, id->device);
175
176 rc = pci_enable_device (pcidev);
177 if (rc) {
178 IRDA_DEBUG(0, "%s(): error rc = %d\n", __FUNCTION__, rc);
179 return -ENODEV;
180 }
181
182 // South Bridge exist
183 if ( ReadLPCReg(0x20) != 0x3C )
184 Chipset=0x3096;
185 else
186 Chipset=0x3076;
187
188 if (Chipset==0x3076) {
189 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __FUNCTION__);
190
191 WriteLPCReg(7,0x0c );
192 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
193 if((temp&0x01)==1) { // BIOS close or no FIR
194 WriteLPCReg(0x1d, 0x82 );
195 WriteLPCReg(0x23,0x18);
196 temp=ReadLPCReg(0xF0);
197 if((temp&0x01)==0) {
198 temp=(ReadLPCReg(0x74)&0x03); //DMA
199 FirDRQ0=temp + 4;
200 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
201 FirDRQ1=temp + 4;
202 } else {
203 temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
204 FirDRQ0=temp + 4;
205 FirDRQ1=FirDRQ0;
206 }
207 FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
208 FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
209 FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
210 FirIOBase=FirIOBase ;
211 info.fir_base=FirIOBase;
212 info.irq=FirIRQ;
213 info.dma=FirDRQ1;
214 info.dma2=FirDRQ0;
215 pci_read_config_byte(pcidev,0x40,&bTmp);
216 pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
217 pci_read_config_byte(pcidev,0x42,&bTmp);
218 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
219 pci_write_config_byte(pcidev,0x5a,0xc0);
220 WriteLPCReg(0x28, 0x70 );
221 if (via_ircc_open(0, &info,0x3076) == 0)
222 rc=0;
223 } else
224 rc = -ENODEV; //IR not turn on
225 } else { //Not VT1211
226 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __FUNCTION__);
227
228 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
229 if((bTmp&0x01)==1) { // BIOS enable FIR
230 //Enable Double DMA clock
231 pci_read_config_byte(pcidev,0x42,&oldPCI_40);
232 pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
233 pci_read_config_byte(pcidev,0x40,&oldPCI_40);
234 pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
235 pci_read_config_byte(pcidev,0x44,&oldPCI_44);
236 pci_write_config_byte(pcidev,0x44,0x4e);
237 //---------- read configuration from Function0 of south bridge
238 if((bTmp&0x02)==0) {
239 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
240 FirDRQ0 = (bTmp1 & 0x30) >> 4;
241 pci_read_config_byte(pcidev,0x44,&bTmp1);
242 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
243 } else {
244 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
245 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
246 FirDRQ1=0;
247 }
248 pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
249 FirIRQ = bTmp1 & 0x0f;
250
251 pci_read_config_byte(pcidev,0x69,&bTmp);
252 FirIOBase = bTmp << 8;//hight byte
253 pci_read_config_byte(pcidev,0x68,&bTmp);
254 FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
255 //-------------------------
256 info.fir_base=FirIOBase;
257 info.irq=FirIRQ;
258 info.dma=FirDRQ1;
259 info.dma2=FirDRQ0;
260 if (via_ircc_open(0, &info,0x3096) == 0)
261 rc=0;
262 } else
263 rc = -ENODEV; //IR not turn on !!!!!
264 }//Not VT1211
265
266 IRDA_DEBUG(2, "%s(): End - rc = %d\n", __FUNCTION__, rc);
267 return rc;
268}
269
270/*
271 * Function via_ircc_clean ()
272 *
273 * Close all configured chips
274 *
275 */
276static void via_ircc_clean(void)
277{
278 int i;
279
280 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
281
282 for (i=0; i < 4; i++) {
283 if (dev_self[i])
284 via_ircc_close(dev_self[i]);
285 }
286}
287
288static void __devexit via_remove_one (struct pci_dev *pdev)
289{
290 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
291
292 /* FIXME : This is ugly. We should use pci_get_drvdata(pdev);
293 * to get our driver instance and call directly via_ircc_close().
294 * See vlsi_ir for details...
295 * Jean II */
296 via_ircc_clean();
297
298 /* FIXME : This should be in via_ircc_close(), because here we may
299 * theoritically disable still configured devices :-( - Jean II */
300 pci_disable_device(pdev);
301}
302
303static void __exit via_ircc_cleanup(void)
304{
305 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
306
307 /* FIXME : This should be redundant, as pci_unregister_driver()
308 * should call via_remove_one() on each device.
309 * Jean II */
310 via_ircc_clean();
311
312 /* Cleanup all instances of the driver */
313 pci_unregister_driver (&via_driver);
314}
315
316/*
317 * Function via_ircc_open (iobase, irq)
318 *
319 * Open driver instance
320 *
321 */
322static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
323{
324 struct net_device *dev;
325 struct via_ircc_cb *self;
326 int err;
327
328 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
329
330 /* Allocate new instance of the driver */
331 dev = alloc_irdadev(sizeof(struct via_ircc_cb));
332 if (dev == NULL)
333 return -ENOMEM;
334
335 self = dev->priv;
336 self->netdev = dev;
337 spin_lock_init(&self->lock);
338
339 /* FIXME : We should store our driver instance in the PCI layer,
340 * using pci_set_drvdata(), not in this array.
341 * See vlsi_ir for details... - Jean II */
342 /* FIXME : 'i' is always 0 (see via_init_one()) :-( - Jean II */
343 /* Need to store self somewhere */
344 dev_self[i] = self;
345 self->index = i;
346 /* Initialize Resource */
347 self->io.cfg_base = info->cfg_base;
348 self->io.fir_base = info->fir_base;
349 self->io.irq = info->irq;
350 self->io.fir_ext = CHIP_IO_EXTENT;
351 self->io.dma = info->dma;
352 self->io.dma2 = info->dma2;
353 self->io.fifo_size = 32;
354 self->chip_id = id;
355 self->st_fifo.len = 0;
356 self->RxDataReady = 0;
357
358 /* Reserve the ioports that we need */
359 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
360 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
361 __FUNCTION__, self->io.fir_base);
362 err = -ENODEV;
363 goto err_out1;
364 }
365
366 /* Initialize QoS for this device */
367 irda_init_max_qos_capabilies(&self->qos);
368
369 /* Check if user has supplied the dongle id or not */
370 if (!dongle_id)
371 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
372 self->io.dongle_id = dongle_id;
373
374 /* The only value we must override it the baudrate */
375 /* Maximum speeds and capabilities are dongle-dependant. */
376 switch( self->io.dongle_id ){
377 case 0x0d:
378 self->qos.baud_rate.bits =
379 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
380 IR_576000 | IR_1152000 | (IR_4000000 << 8);
381 break;
382 default:
383 self->qos.baud_rate.bits =
384 IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
385 break;
386 }
387
388 /* Following was used for testing:
389 *
390 * self->qos.baud_rate.bits = IR_9600;
391 *
392 * Is is no good, as it prohibits (error-prone) speed-changes.
393 */
394
395 self->qos.min_turn_time.bits = qos_mtt_bits;
396 irda_qos_bits_to_value(&self->qos);
397
398 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
399 self->rx_buff.truesize = 14384 + 2048;
400 self->tx_buff.truesize = 14384 + 2048;
401
402 /* Allocate memory if needed */
403 self->rx_buff.head =
404 dma_alloc_coherent(NULL, self->rx_buff.truesize,
405 &self->rx_buff_dma, GFP_KERNEL);
406 if (self->rx_buff.head == NULL) {
407 err = -ENOMEM;
408 goto err_out2;
409 }
410 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
411
412 self->tx_buff.head =
413 dma_alloc_coherent(NULL, self->tx_buff.truesize,
414 &self->tx_buff_dma, GFP_KERNEL);
415 if (self->tx_buff.head == NULL) {
416 err = -ENOMEM;
417 goto err_out3;
418 }
419 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
420
421 self->rx_buff.in_frame = FALSE;
422 self->rx_buff.state = OUTSIDE_FRAME;
423 self->tx_buff.data = self->tx_buff.head;
424 self->rx_buff.data = self->rx_buff.head;
425
426 /* Reset Tx queue info */
427 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
428 self->tx_fifo.tail = self->tx_buff.head;
429
430 /* Keep track of module usage */
431 SET_MODULE_OWNER(dev);
432
433 /* Override the network functions we need to use */
434 dev->hard_start_xmit = via_ircc_hard_xmit_sir;
435 dev->open = via_ircc_net_open;
436 dev->stop = via_ircc_net_close;
437 dev->do_ioctl = via_ircc_net_ioctl;
438 dev->get_stats = via_ircc_net_get_stats;
439
440 err = register_netdev(dev);
441 if (err)
442 goto err_out4;
443
444 IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
445
446 /* Initialise the hardware..
447 */
448 self->io.speed = 9600;
449 via_hw_init(self);
450 return 0;
451 err_out4:
452 dma_free_coherent(NULL, self->tx_buff.truesize,
453 self->tx_buff.head, self->tx_buff_dma);
454 err_out3:
455 dma_free_coherent(NULL, self->rx_buff.truesize,
456 self->rx_buff.head, self->rx_buff_dma);
457 err_out2:
458 release_region(self->io.fir_base, self->io.fir_ext);
459 err_out1:
460 free_netdev(dev);
461 dev_self[i] = NULL;
462 return err;
463}
464
465/*
466 * Function via_ircc_close (self)
467 *
468 * Close driver instance
469 *
470 */
471static int via_ircc_close(struct via_ircc_cb *self)
472{
473 int iobase;
474
475 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
476
477 IRDA_ASSERT(self != NULL, return -1;);
478
479 iobase = self->io.fir_base;
480
481 ResetChip(iobase, 5); //hardware reset.
482 /* Remove netdevice */
483 unregister_netdev(self->netdev);
484
485 /* Release the PORT that this driver is using */
486 IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
487 __FUNCTION__, self->io.fir_base);
488 release_region(self->io.fir_base, self->io.fir_ext);
489 if (self->tx_buff.head)
490 dma_free_coherent(NULL, self->tx_buff.truesize,
491 self->tx_buff.head, self->tx_buff_dma);
492 if (self->rx_buff.head)
493 dma_free_coherent(NULL, self->rx_buff.truesize,
494 self->rx_buff.head, self->rx_buff_dma);
495 dev_self[self->index] = NULL;
496
497 free_netdev(self->netdev);
498
499 return 0;
500}
501
502/*
503 * Function via_hw_init(self)
504 *
505 * Returns non-negative on success.
506 *
507 * Formerly via_ircc_setup
508 */
509static void via_hw_init(struct via_ircc_cb *self)
510{
511 int iobase = self->io.fir_base;
512
513 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
514
515 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
516 // FIFO Init
517 EnRXFIFOReadyInt(iobase, OFF);
518 EnRXFIFOHalfLevelInt(iobase, OFF);
519 EnTXFIFOHalfLevelInt(iobase, OFF);
520 EnTXFIFOUnderrunEOMInt(iobase, ON);
521 EnTXFIFOReadyInt(iobase, OFF);
522 InvertTX(iobase, OFF);
523 InvertRX(iobase, OFF);
524
525 if (ReadLPCReg(0x20) == 0x3c)
526 WriteLPCReg(0xF0, 0); // for VT1211
527 /* Int Init */
528 EnRXSpecInt(iobase, ON);
529
530 /* The following is basically hwreset */
531 /* If this is the case, why not just call hwreset() ? Jean II */
532 ResetChip(iobase, 5);
533 EnableDMA(iobase, OFF);
534 EnableTX(iobase, OFF);
535 EnableRX(iobase, OFF);
536 EnRXDMA(iobase, OFF);
537 EnTXDMA(iobase, OFF);
538 RXStart(iobase, OFF);
539 TXStart(iobase, OFF);
540 InitCard(iobase);
541 CommonInit(iobase);
542 SIRFilter(iobase, ON);
543 SetSIR(iobase, ON);
544 CRC16(iobase, ON);
545 EnTXCRC(iobase, 0);
546 WriteReg(iobase, I_ST_CT_0, 0x00);
547 SetBaudRate(iobase, 9600);
548 SetPulseWidth(iobase, 12);
549 SetSendPreambleCount(iobase, 0);
550
551 self->io.speed = 9600;
552 self->st_fifo.len = 0;
553
554 via_ircc_change_dongle_speed(iobase, self->io.speed,
555 self->io.dongle_id);
556
557 WriteReg(iobase, I_ST_CT_0, 0x80);
558}
559
560/*
561 * Function via_ircc_read_dongle_id (void)
562 *
563 */
564static int via_ircc_read_dongle_id(int iobase)
565{
566 int dongle_id = 9; /* Default to IBM */
567
568 IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
569 return dongle_id;
570}
571
572/*
573 * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
574 * Change speed of the attach dongle
575 * only implement two type of dongle currently.
576 */
577static void via_ircc_change_dongle_speed(int iobase, int speed,
578 int dongle_id)
579{
580 u8 mode = 0;
581
582 /* speed is unused, as we use IsSIROn()/IsMIROn() */
583 speed = speed;
584
585 IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
586 __FUNCTION__, speed, iobase, dongle_id);
587
588 switch (dongle_id) {
589
590 /* Note: The dongle_id's listed here are derived from
591 * nsc-ircc.c */
592
593 case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
594 UseOneRX(iobase, ON); // use one RX pin RX1,RX2
595 InvertTX(iobase, OFF);
596 InvertRX(iobase, OFF);
597
598 EnRX2(iobase, ON); //sir to rx2
599 EnGPIOtoRX2(iobase, OFF);
600
601 if (IsSIROn(iobase)) { //sir
602 // Mode select Off
603 SlowIRRXLowActive(iobase, ON);
604 udelay(1000);
605 SlowIRRXLowActive(iobase, OFF);
606 } else {
607 if (IsMIROn(iobase)) { //mir
608 // Mode select On
609 SlowIRRXLowActive(iobase, OFF);
610 udelay(20);
611 } else { // fir
612 if (IsFIROn(iobase)) { //fir
613 // Mode select On
614 SlowIRRXLowActive(iobase, OFF);
615 udelay(20);
616 }
617 }
618 }
619 break;
620
621 case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
622 UseOneRX(iobase, ON); //use ONE RX....RX1
623 InvertTX(iobase, OFF);
624 InvertRX(iobase, OFF); // invert RX pin
625
626 EnRX2(iobase, ON);
627 EnGPIOtoRX2(iobase, OFF);
628 if (IsSIROn(iobase)) { //sir
629 // Mode select On
630 SlowIRRXLowActive(iobase, ON);
631 udelay(20);
632 // Mode select Off
633 SlowIRRXLowActive(iobase, OFF);
634 }
635 if (IsMIROn(iobase)) { //mir
636 // Mode select On
637 SlowIRRXLowActive(iobase, OFF);
638 udelay(20);
639 // Mode select Off
640 SlowIRRXLowActive(iobase, ON);
641 } else { // fir
642 if (IsFIROn(iobase)) { //fir
643 // Mode select On
644 SlowIRRXLowActive(iobase, OFF);
645 // TX On
646 WriteTX(iobase, ON);
647 udelay(20);
648 // Mode select OFF
649 SlowIRRXLowActive(iobase, ON);
650 udelay(20);
651 // TX Off
652 WriteTX(iobase, OFF);
653 }
654 }
655 break;
656
657 case 0x0d:
658 UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
659 InvertTX(iobase, OFF);
660 InvertRX(iobase, OFF);
661 SlowIRRXLowActive(iobase, OFF);
662 if (IsSIROn(iobase)) { //sir
663 EnGPIOtoRX2(iobase, OFF);
664 WriteGIO(iobase, OFF);
665 EnRX2(iobase, OFF); //sir to rx2
666 } else { // fir mir
667 EnGPIOtoRX2(iobase, OFF);
668 WriteGIO(iobase, OFF);
669 EnRX2(iobase, OFF); //fir to rx
670 }
671 break;
672
673 case 0x11: /* Temic TFDS4500 */
674
675 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __FUNCTION__);
676
677 UseOneRX(iobase, ON); //use ONE RX....RX1
678 InvertTX(iobase, OFF);
679 InvertRX(iobase, ON); // invert RX pin
680
681 EnRX2(iobase, ON); //sir to rx2
682 EnGPIOtoRX2(iobase, OFF);
683
684 if( IsSIROn(iobase) ){ //sir
685
686 // Mode select On
687 SlowIRRXLowActive(iobase, ON);
688 udelay(20);
689 // Mode select Off
690 SlowIRRXLowActive(iobase, OFF);
691
692 } else{
693 IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __FUNCTION__);
694 }
695 break;
696
697 case 0x0ff: /* Vishay */
698 if (IsSIROn(iobase))
699 mode = 0;
700 else if (IsMIROn(iobase))
701 mode = 1;
702 else if (IsFIROn(iobase))
703 mode = 2;
704 else if (IsVFIROn(iobase))
705 mode = 5; //VFIR-16
706 SI_SetMode(iobase, mode);
707 break;
708
709 default:
710 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
711 __FUNCTION__, dongle_id);
712 }
713}
714
715/*
716 * Function via_ircc_change_speed (self, baud)
717 *
718 * Change the speed of the device
719 *
720 */
721static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
722{
723 struct net_device *dev = self->netdev;
724 u16 iobase;
725 u8 value = 0, bTmp;
726
727 iobase = self->io.fir_base;
728 /* Update accounting for new speed */
729 self->io.speed = speed;
730 IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __FUNCTION__, speed);
731
732 WriteReg(iobase, I_ST_CT_0, 0x0);
733
734 /* Controller mode sellection */
735 switch (speed) {
736 case 2400:
737 case 9600:
738 case 19200:
739 case 38400:
740 case 57600:
741 case 115200:
742 value = (115200/speed)-1;
743 SetSIR(iobase, ON);
744 CRC16(iobase, ON);
745 break;
746 case 576000:
747 /* FIXME: this can't be right, as it's the same as 115200,
748 * and 576000 is MIR, not SIR. */
749 value = 0;
750 SetSIR(iobase, ON);
751 CRC16(iobase, ON);
752 break;
753 case 1152000:
754 value = 0;
755 SetMIR(iobase, ON);
756 /* FIXME: CRC ??? */
757 break;
758 case 4000000:
759 value = 0;
760 SetFIR(iobase, ON);
761 SetPulseWidth(iobase, 0);
762 SetSendPreambleCount(iobase, 14);
763 CRC16(iobase, OFF);
764 EnTXCRC(iobase, ON);
765 break;
766 case 16000000:
767 value = 0;
768 SetVFIR(iobase, ON);
769 /* FIXME: CRC ??? */
770 break;
771 default:
772 value = 0;
773 break;
774 }
775
776 /* Set baudrate to 0x19[2..7] */
777 bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
778 bTmp |= value << 2;
779 WriteReg(iobase, I_CF_H_1, bTmp);
780
781 /* Some dongles may need to be informed about speed changes. */
782 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
783
784 /* Set FIFO size to 64 */
785 SetFIFO(iobase, 64);
786
787 /* Enable IR */
788 WriteReg(iobase, I_ST_CT_0, 0x80);
789
790 // EnTXFIFOHalfLevelInt(iobase,ON);
791
792 /* Enable some interrupts so we can receive frames */
793 //EnAllInt(iobase,ON);
794
795 if (IsSIROn(iobase)) {
796 SIRFilter(iobase, ON);
797 SIRRecvAny(iobase, ON);
798 } else {
799 SIRFilter(iobase, OFF);
800 SIRRecvAny(iobase, OFF);
801 }
802
803 if (speed > 115200) {
804 /* Install FIR xmit handler */
805 dev->hard_start_xmit = via_ircc_hard_xmit_fir;
806 via_ircc_dma_receive(self);
807 } else {
808 /* Install SIR xmit handler */
809 dev->hard_start_xmit = via_ircc_hard_xmit_sir;
810 }
811 netif_wake_queue(dev);
812}
813
814/*
815 * Function via_ircc_hard_xmit (skb, dev)
816 *
817 * Transmit the frame!
818 *
819 */
820static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
821 struct net_device *dev)
822{
823 struct via_ircc_cb *self;
824 unsigned long flags;
825 u16 iobase;
826 __u32 speed;
827
828 self = (struct via_ircc_cb *) dev->priv;
829 IRDA_ASSERT(self != NULL, return 0;);
830 iobase = self->io.fir_base;
831
832 netif_stop_queue(dev);
833 /* Check if we need to change the speed */
834 speed = irda_get_next_speed(skb);
835 if ((speed != self->io.speed) && (speed != -1)) {
836 /* Check for empty frame */
837 if (!skb->len) {
838 via_ircc_change_speed(self, speed);
839 dev->trans_start = jiffies;
840 dev_kfree_skb(skb);
841 return 0;
842 } else
843 self->new_speed = speed;
844 }
845 InitCard(iobase);
846 CommonInit(iobase);
847 SIRFilter(iobase, ON);
848 SetSIR(iobase, ON);
849 CRC16(iobase, ON);
850 EnTXCRC(iobase, 0);
851 WriteReg(iobase, I_ST_CT_0, 0x00);
852
853 spin_lock_irqsave(&self->lock, flags);
854 self->tx_buff.data = self->tx_buff.head;
855 self->tx_buff.len =
856 async_wrap_skb(skb, self->tx_buff.data,
857 self->tx_buff.truesize);
858
859 self->stats.tx_bytes += self->tx_buff.len;
860 /* Send this frame with old speed */
861 SetBaudRate(iobase, self->io.speed);
862 SetPulseWidth(iobase, 12);
863 SetSendPreambleCount(iobase, 0);
864 WriteReg(iobase, I_ST_CT_0, 0x80);
865
866 EnableTX(iobase, ON);
867 EnableRX(iobase, OFF);
868
869 ResetChip(iobase, 0);
870 ResetChip(iobase, 1);
871 ResetChip(iobase, 2);
872 ResetChip(iobase, 3);
873 ResetChip(iobase, 4);
874
875 EnAllInt(iobase, ON);
876 EnTXDMA(iobase, ON);
877 EnRXDMA(iobase, OFF);
878
879 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
880 DMA_TX_MODE);
881
882 SetSendByte(iobase, self->tx_buff.len);
883 RXStart(iobase, OFF);
884 TXStart(iobase, ON);
885
886 dev->trans_start = jiffies;
887 spin_unlock_irqrestore(&self->lock, flags);
888 dev_kfree_skb(skb);
889 return 0;
890}
891
892static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
893 struct net_device *dev)
894{
895 struct via_ircc_cb *self;
896 u16 iobase;
897 __u32 speed;
898 unsigned long flags;
899
900 self = (struct via_ircc_cb *) dev->priv;
901 iobase = self->io.fir_base;
902
903 if (self->st_fifo.len)
904 return 0;
905 if (self->chip_id == 0x3076)
906 iodelay(1500);
907 else
908 udelay(1500);
909 netif_stop_queue(dev);
910 speed = irda_get_next_speed(skb);
911 if ((speed != self->io.speed) && (speed != -1)) {
912 if (!skb->len) {
913 via_ircc_change_speed(self, speed);
914 dev->trans_start = jiffies;
915 dev_kfree_skb(skb);
916 return 0;
917 } else
918 self->new_speed = speed;
919 }
920 spin_lock_irqsave(&self->lock, flags);
921 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
922 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
923
924 self->tx_fifo.tail += skb->len;
925 self->stats.tx_bytes += skb->len;
926 memcpy(self->tx_fifo.queue[self->tx_fifo.free].start, skb->data,
927 skb->len);
928 self->tx_fifo.len++;
929 self->tx_fifo.free++;
930//F01 if (self->tx_fifo.len == 1) {
931 via_ircc_dma_xmit(self, iobase);
932//F01 }
933//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
934 dev->trans_start = jiffies;
935 dev_kfree_skb(skb);
936 spin_unlock_irqrestore(&self->lock, flags);
937 return 0;
938
939}
940
941static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
942{
943 EnTXDMA(iobase, OFF);
944 self->io.direction = IO_XMIT;
945 EnPhys(iobase, ON);
946 EnableTX(iobase, ON);
947 EnableRX(iobase, OFF);
948 ResetChip(iobase, 0);
949 ResetChip(iobase, 1);
950 ResetChip(iobase, 2);
951 ResetChip(iobase, 3);
952 ResetChip(iobase, 4);
953 EnAllInt(iobase, ON);
954 EnTXDMA(iobase, ON);
955 EnRXDMA(iobase, OFF);
956 irda_setup_dma(self->io.dma,
957 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
958 self->tx_buff.head) + self->tx_buff_dma,
959 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
960 IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
961 __FUNCTION__, self->tx_fifo.ptr,
962 self->tx_fifo.queue[self->tx_fifo.ptr].len,
963 self->tx_fifo.len);
964
965 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
966 RXStart(iobase, OFF);
967 TXStart(iobase, ON);
968 return 0;
969
970}
971
972/*
973 * Function via_ircc_dma_xmit_complete (self)
974 *
975 * The transfer of a frame in finished. This function will only be called
976 * by the interrupt handler
977 *
978 */
979static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
980{
981 int iobase;
982 int ret = TRUE;
983 u8 Tx_status;
984
985 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
986
987 iobase = self->io.fir_base;
988 /* Disable DMA */
989// DisableDmaChannel(self->io.dma);
990 /* Check for underrrun! */
991 /* Clear bit, by writing 1 into it */
992 Tx_status = GetTXStatus(iobase);
993 if (Tx_status & 0x08) {
994 self->stats.tx_errors++;
995 self->stats.tx_fifo_errors++;
996 hwreset(self);
997// how to clear underrrun ?
998 } else {
999 self->stats.tx_packets++;
1000 ResetChip(iobase, 3);
1001 ResetChip(iobase, 4);
1002 }
1003 /* Check if we need to change the speed */
1004 if (self->new_speed) {
1005 via_ircc_change_speed(self, self->new_speed);
1006 self->new_speed = 0;
1007 }
1008
1009 /* Finished with this frame, so prepare for next */
1010 if (IsFIROn(iobase)) {
1011 if (self->tx_fifo.len) {
1012 self->tx_fifo.len--;
1013 self->tx_fifo.ptr++;
1014 }
1015 }
1016 IRDA_DEBUG(1,
1017 "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
1018 __FUNCTION__,
1019 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
1020/* F01_S
1021 // Any frames to be sent back-to-back?
1022 if (self->tx_fifo.len) {
1023 // Not finished yet!
1024 via_ircc_dma_xmit(self, iobase);
1025 ret = FALSE;
1026 } else {
1027F01_E*/
1028 // Reset Tx FIFO info
1029 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1030 self->tx_fifo.tail = self->tx_buff.head;
1031//F01 }
1032
1033 // Make sure we have room for more frames
1034//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
1035 // Not busy transmitting anymore
1036 // Tell the network layer, that we can accept more frames
1037 netif_wake_queue(self->netdev);
1038//F01 }
1039 return ret;
1040}
1041
1042/*
1043 * Function via_ircc_dma_receive (self)
1044 *
1045 * Set configuration for receive a frame.
1046 *
1047 */
1048static int via_ircc_dma_receive(struct via_ircc_cb *self)
1049{
1050 int iobase;
1051
1052 iobase = self->io.fir_base;
1053
1054 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
1055
1056 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1057 self->tx_fifo.tail = self->tx_buff.head;
1058 self->RxDataReady = 0;
1059 self->io.direction = IO_RECV;
1060 self->rx_buff.data = self->rx_buff.head;
1061 self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1062 self->st_fifo.tail = self->st_fifo.head = 0;
1063
1064 EnPhys(iobase, ON);
1065 EnableTX(iobase, OFF);
1066 EnableRX(iobase, ON);
1067
1068 ResetChip(iobase, 0);
1069 ResetChip(iobase, 1);
1070 ResetChip(iobase, 2);
1071 ResetChip(iobase, 3);
1072 ResetChip(iobase, 4);
1073
1074 EnAllInt(iobase, ON);
1075 EnTXDMA(iobase, OFF);
1076 EnRXDMA(iobase, ON);
1077 irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1078 self->rx_buff.truesize, DMA_RX_MODE);
1079 TXStart(iobase, OFF);
1080 RXStart(iobase, ON);
1081
1082 return 0;
1083}
1084
1085/*
1086 * Function via_ircc_dma_receive_complete (self)
1087 *
1088 * Controller Finished with receiving frames,
1089 * and this routine is call by ISR
1090 *
1091 */
1092static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1093 int iobase)
1094{
1095 struct st_fifo *st_fifo;
1096 struct sk_buff *skb;
1097 int len, i;
1098 u8 status = 0;
1099
1100 iobase = self->io.fir_base;
1101 st_fifo = &self->st_fifo;
1102
1103 if (self->io.speed < 4000000) { //Speed below FIR
1104 len = GetRecvByte(iobase, self);
1105 skb = dev_alloc_skb(len + 1);
1106 if (skb == NULL)
1107 return FALSE;
1108 // Make sure IP header gets aligned
1109 skb_reserve(skb, 1);
1110 skb_put(skb, len - 2);
1111 if (self->chip_id == 0x3076) {
1112 for (i = 0; i < len - 2; i++)
1113 skb->data[i] = self->rx_buff.data[i * 2];
1114 } else {
1115 if (self->chip_id == 0x3096) {
1116 for (i = 0; i < len - 2; i++)
1117 skb->data[i] =
1118 self->rx_buff.data[i];
1119 }
1120 }
1121 // Move to next frame
1122 self->rx_buff.data += len;
1123 self->stats.rx_bytes += len;
1124 self->stats.rx_packets++;
1125 skb->dev = self->netdev;
1126 skb->mac.raw = skb->data;
1127 skb->protocol = htons(ETH_P_IRDA);
1128 netif_rx(skb);
1129 return TRUE;
1130 }
1131
1132 else { //FIR mode
1133 len = GetRecvByte(iobase, self);
1134 if (len == 0)
1135 return TRUE; //interrupt only, data maybe move by RxT
1136 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1137 IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1138 __FUNCTION__, len, RxCurCount(iobase, self),
1139 self->RxLastCount);
1140 hwreset(self);
1141 return FALSE;
1142 }
1143 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1144 __FUNCTION__,
1145 st_fifo->len, len - 4, RxCurCount(iobase, self));
1146
1147 st_fifo->entries[st_fifo->tail].status = status;
1148 st_fifo->entries[st_fifo->tail].len = len;
1149 st_fifo->pending_bytes += len;
1150 st_fifo->tail++;
1151 st_fifo->len++;
1152 if (st_fifo->tail > MAX_RX_WINDOW)
1153 st_fifo->tail = 0;
1154 self->RxDataReady = 0;
1155
1156 // It maybe have MAX_RX_WINDOW package receive by
1157 // receive_complete before Timer IRQ
1158/* F01_S
1159 if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
1160 RXStart(iobase,ON);
1161 SetTimer(iobase,4);
1162 }
1163 else {
1164F01_E */
1165 EnableRX(iobase, OFF);
1166 EnRXDMA(iobase, OFF);
1167 RXStart(iobase, OFF);
1168//F01_S
1169 // Put this entry back in fifo
1170 if (st_fifo->head > MAX_RX_WINDOW)
1171 st_fifo->head = 0;
1172 status = st_fifo->entries[st_fifo->head].status;
1173 len = st_fifo->entries[st_fifo->head].len;
1174 st_fifo->head++;
1175 st_fifo->len--;
1176
1177 skb = dev_alloc_skb(len + 1 - 4);
1178 /*
1179 * if frame size,data ptr,or skb ptr are wrong ,the get next
1180 * entry.
1181 */
1182 if ((skb == NULL) || (skb->data == NULL)
1183 || (self->rx_buff.data == NULL) || (len < 6)) {
1184 self->stats.rx_dropped++;
1185 return TRUE;
1186 }
1187 skb_reserve(skb, 1);
1188 skb_put(skb, len - 4);
1189
1190 memcpy(skb->data, self->rx_buff.data, len - 4);
1191 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __FUNCTION__,
1192 len - 4, self->rx_buff.data);
1193
1194 // Move to next frame
1195 self->rx_buff.data += len;
1196 self->stats.rx_bytes += len;
1197 self->stats.rx_packets++;
1198 skb->dev = self->netdev;
1199 skb->mac.raw = skb->data;
1200 skb->protocol = htons(ETH_P_IRDA);
1201 netif_rx(skb);
1202
1203//F01_E
1204 } //FIR
1205 return TRUE;
1206
1207}
1208
1209/*
1210 * if frame is received , but no INT ,then use this routine to upload frame.
1211 */
1212static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1213{
1214 struct sk_buff *skb;
1215 int len;
1216 struct st_fifo *st_fifo;
1217 st_fifo = &self->st_fifo;
1218
1219 len = GetRecvByte(iobase, self);
1220
1221 IRDA_DEBUG(2, "%s(): len=%x\n", __FUNCTION__, len);
1222
1223 skb = dev_alloc_skb(len + 1);
1224 if ((skb == NULL) || ((len - 4) < 2)) {
1225 self->stats.rx_dropped++;
1226 return FALSE;
1227 }
1228 skb_reserve(skb, 1);
1229 skb_put(skb, len - 4 + 1);
1230 memcpy(skb->data, self->rx_buff.data, len - 4 + 1);
1231 st_fifo->tail++;
1232 st_fifo->len++;
1233 if (st_fifo->tail > MAX_RX_WINDOW)
1234 st_fifo->tail = 0;
1235 // Move to next frame
1236 self->rx_buff.data += len;
1237 self->stats.rx_bytes += len;
1238 self->stats.rx_packets++;
1239 skb->dev = self->netdev;
1240 skb->mac.raw = skb->data;
1241 skb->protocol = htons(ETH_P_IRDA);
1242 netif_rx(skb);
1243 if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1244 RXStart(iobase, ON);
1245 } else {
1246 EnableRX(iobase, OFF);
1247 EnRXDMA(iobase, OFF);
1248 RXStart(iobase, OFF);
1249 }
1250 return TRUE;
1251}
1252
1253/*
1254 * Implement back to back receive , use this routine to upload data.
1255 */
1256
1257static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1258{
1259 struct st_fifo *st_fifo;
1260 struct sk_buff *skb;
1261 int len;
1262 u8 status;
1263
1264 st_fifo = &self->st_fifo;
1265
1266 if (CkRxRecv(iobase, self)) {
1267 // if still receiving ,then return ,don't upload frame
1268 self->RetryCount = 0;
1269 SetTimer(iobase, 20);
1270 self->RxDataReady++;
1271 return FALSE;
1272 } else
1273 self->RetryCount++;
1274
1275 if ((self->RetryCount >= 1) ||
1276 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize)
1277 || (st_fifo->len >= (MAX_RX_WINDOW))) {
1278 while (st_fifo->len > 0) { //upload frame
1279 // Put this entry back in fifo
1280 if (st_fifo->head > MAX_RX_WINDOW)
1281 st_fifo->head = 0;
1282 status = st_fifo->entries[st_fifo->head].status;
1283 len = st_fifo->entries[st_fifo->head].len;
1284 st_fifo->head++;
1285 st_fifo->len--;
1286
1287 skb = dev_alloc_skb(len + 1 - 4);
1288 /*
1289 * if frame size, data ptr, or skb ptr are wrong,
1290 * then get next entry.
1291 */
1292 if ((skb == NULL) || (skb->data == NULL)
1293 || (self->rx_buff.data == NULL) || (len < 6)) {
1294 self->stats.rx_dropped++;
1295 continue;
1296 }
1297 skb_reserve(skb, 1);
1298 skb_put(skb, len - 4);
1299 memcpy(skb->data, self->rx_buff.data, len - 4);
1300
1301 IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __FUNCTION__,
1302 len - 4, st_fifo->head);
1303
1304 // Move to next frame
1305 self->rx_buff.data += len;
1306 self->stats.rx_bytes += len;
1307 self->stats.rx_packets++;
1308 skb->dev = self->netdev;
1309 skb->mac.raw = skb->data;
1310 skb->protocol = htons(ETH_P_IRDA);
1311 netif_rx(skb);
1312 } //while
1313 self->RetryCount = 0;
1314
1315 IRDA_DEBUG(2,
1316 "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1317 __FUNCTION__,
1318 GetHostStatus(iobase), GetRXStatus(iobase));
1319
1320 /*
1321 * if frame is receive complete at this routine ,then upload
1322 * frame.
1323 */
1324 if ((GetRXStatus(iobase) & 0x10)
1325 && (RxCurCount(iobase, self) != self->RxLastCount)) {
1326 upload_rxdata(self, iobase);
1327 if (irda_device_txqueue_empty(self->netdev))
1328 via_ircc_dma_receive(self);
1329 }
1330 } // timer detect complete
1331 else
1332 SetTimer(iobase, 4);
1333 return TRUE;
1334
1335}
1336
1337
1338
1339/*
1340 * Function via_ircc_interrupt (irq, dev_id, regs)
1341 *
1342 * An interrupt from the chip has arrived. Time to do some work
1343 *
1344 */
1345static irqreturn_t via_ircc_interrupt(int irq, void *dev_id,
1346 struct pt_regs *regs)
1347{
1348 struct net_device *dev = (struct net_device *) dev_id;
1349 struct via_ircc_cb *self;
1350 int iobase;
1351 u8 iHostIntType, iRxIntType, iTxIntType;
1352
1353 if (!dev) {
1354 IRDA_WARNING("%s: irq %d for unknown device.\n", driver_name,
1355 irq);
1356 return IRQ_NONE;
1357 }
1358 self = (struct via_ircc_cb *) dev->priv;
1359 iobase = self->io.fir_base;
1360 spin_lock(&self->lock);
1361 iHostIntType = GetHostStatus(iobase);
1362
1363 IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
1364 __FUNCTION__, iHostIntType,
1365 (iHostIntType & 0x40) ? "Timer" : "",
1366 (iHostIntType & 0x20) ? "Tx" : "",
1367 (iHostIntType & 0x10) ? "Rx" : "",
1368 (iHostIntType & 0x0e) >> 1);
1369
1370 if ((iHostIntType & 0x40) != 0) { //Timer Event
1371 self->EventFlag.TimeOut++;
1372 ClearTimerInt(iobase, 1);
1373 if (self->io.direction == IO_XMIT) {
1374 via_ircc_dma_xmit(self, iobase);
1375 }
1376 if (self->io.direction == IO_RECV) {
1377 /*
1378 * frame ready hold too long, must reset.
1379 */
1380 if (self->RxDataReady > 30) {
1381 hwreset(self);
1382 if (irda_device_txqueue_empty(self->netdev)) {
1383 via_ircc_dma_receive(self);
1384 }
1385 } else { // call this to upload frame.
1386 RxTimerHandler(self, iobase);
1387 }
1388 } //RECV
1389 } //Timer Event
1390 if ((iHostIntType & 0x20) != 0) { //Tx Event
1391 iTxIntType = GetTXStatus(iobase);
1392
1393 IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
1394 __FUNCTION__, iTxIntType,
1395 (iTxIntType & 0x08) ? "FIFO underr." : "",
1396 (iTxIntType & 0x04) ? "EOM" : "",
1397 (iTxIntType & 0x02) ? "FIFO ready" : "",
1398 (iTxIntType & 0x01) ? "Early EOM" : "");
1399
1400 if (iTxIntType & 0x4) {
1401 self->EventFlag.EOMessage++; // read and will auto clean
1402 if (via_ircc_dma_xmit_complete(self)) {
1403 if (irda_device_txqueue_empty
1404 (self->netdev)) {
1405 via_ircc_dma_receive(self);
1406 }
1407 } else {
1408 self->EventFlag.Unknown++;
1409 }
1410 } //EOP
1411 } //Tx Event
1412 //----------------------------------------
1413 if ((iHostIntType & 0x10) != 0) { //Rx Event
1414 /* Check if DMA has finished */
1415 iRxIntType = GetRXStatus(iobase);
1416
1417 IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
1418 __FUNCTION__, iRxIntType,
1419 (iRxIntType & 0x80) ? "PHY err." : "",
1420 (iRxIntType & 0x40) ? "CRC err" : "",
1421 (iRxIntType & 0x20) ? "FIFO overr." : "",
1422 (iRxIntType & 0x10) ? "EOF" : "",
1423 (iRxIntType & 0x08) ? "RxData" : "",
1424 (iRxIntType & 0x02) ? "RxMaxLen" : "",
1425 (iRxIntType & 0x01) ? "SIR bad" : "");
1426 if (!iRxIntType)
1427 IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __FUNCTION__);
1428
1429 if (iRxIntType & 0x10) {
1430 if (via_ircc_dma_receive_complete(self, iobase)) {
1431//F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
1432 via_ircc_dma_receive(self);
1433 }
1434 } // No ERR
1435 else { //ERR
1436 IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1437 __FUNCTION__, iRxIntType, iHostIntType,
1438 RxCurCount(iobase, self),
1439 self->RxLastCount);
1440
1441 if (iRxIntType & 0x20) { //FIFO OverRun ERR
1442 ResetChip(iobase, 0);
1443 ResetChip(iobase, 1);
1444 } else { //PHY,CRC ERR
1445
1446 if (iRxIntType != 0x08)
1447 hwreset(self); //F01
1448 }
1449 via_ircc_dma_receive(self);
1450 } //ERR
1451
1452 } //Rx Event
1453 spin_unlock(&self->lock);
1454 return IRQ_RETVAL(iHostIntType);
1455}
1456
1457static void hwreset(struct via_ircc_cb *self)
1458{
1459 int iobase;
1460 iobase = self->io.fir_base;
1461
1462 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
1463
1464 ResetChip(iobase, 5);
1465 EnableDMA(iobase, OFF);
1466 EnableTX(iobase, OFF);
1467 EnableRX(iobase, OFF);
1468 EnRXDMA(iobase, OFF);
1469 EnTXDMA(iobase, OFF);
1470 RXStart(iobase, OFF);
1471 TXStart(iobase, OFF);
1472 InitCard(iobase);
1473 CommonInit(iobase);
1474 SIRFilter(iobase, ON);
1475 SetSIR(iobase, ON);
1476 CRC16(iobase, ON);
1477 EnTXCRC(iobase, 0);
1478 WriteReg(iobase, I_ST_CT_0, 0x00);
1479 SetBaudRate(iobase, 9600);
1480 SetPulseWidth(iobase, 12);
1481 SetSendPreambleCount(iobase, 0);
1482 WriteReg(iobase, I_ST_CT_0, 0x80);
1483
1484 /* Restore speed. */
1485 via_ircc_change_speed(self, self->io.speed);
1486
1487 self->st_fifo.len = 0;
1488}
1489
1490/*
1491 * Function via_ircc_is_receiving (self)
1492 *
1493 * Return TRUE is we are currently receiving a frame
1494 *
1495 */
1496static int via_ircc_is_receiving(struct via_ircc_cb *self)
1497{
1498 int status = FALSE;
1499 int iobase;
1500
1501 IRDA_ASSERT(self != NULL, return FALSE;);
1502
1503 iobase = self->io.fir_base;
1504 if (CkRxRecv(iobase, self))
1505 status = TRUE;
1506
1507 IRDA_DEBUG(2, "%s(): status=%x....\n", __FUNCTION__, status);
1508
1509 return status;
1510}
1511
1512
1513/*
1514 * Function via_ircc_net_open (dev)
1515 *
1516 * Start the device
1517 *
1518 */
1519static int via_ircc_net_open(struct net_device *dev)
1520{
1521 struct via_ircc_cb *self;
1522 int iobase;
1523 char hwname[32];
1524
1525 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
1526
1527 IRDA_ASSERT(dev != NULL, return -1;);
1528 self = (struct via_ircc_cb *) dev->priv;
1529 self->stats.rx_packets = 0;
1530 IRDA_ASSERT(self != NULL, return 0;);
1531 iobase = self->io.fir_base;
1532 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1533 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1534 self->io.irq);
1535 return -EAGAIN;
1536 }
1537 /*
1538 * Always allocate the DMA channel after the IRQ, and clean up on
1539 * failure.
1540 */
1541 if (request_dma(self->io.dma, dev->name)) {
1542 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1543 self->io.dma);
1544 free_irq(self->io.irq, self);
1545 return -EAGAIN;
1546 }
1547 if (self->io.dma2 != self->io.dma) {
1548 if (request_dma(self->io.dma2, dev->name)) {
1549 IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1550 driver_name, self->io.dma2);
1551 free_irq(self->io.irq, self);
1552 return -EAGAIN;
1553 }
1554 }
1555
1556
1557 /* turn on interrupts */
1558 EnAllInt(iobase, ON);
1559 EnInternalLoop(iobase, OFF);
1560 EnExternalLoop(iobase, OFF);
1561
1562 /* */
1563 via_ircc_dma_receive(self);
1564
1565 /* Ready to play! */
1566 netif_start_queue(dev);
1567
1568 /*
1569 * Open new IrLAP layer instance, now that everything should be
1570 * initialized properly
1571 */
1572 sprintf(hwname, "VIA @ 0x%x", iobase);
1573 self->irlap = irlap_open(dev, &self->qos, hwname);
1574
1575 self->RxLastCount = 0;
1576
1577 return 0;
1578}
1579
1580/*
1581 * Function via_ircc_net_close (dev)
1582 *
1583 * Stop the device
1584 *
1585 */
1586static int via_ircc_net_close(struct net_device *dev)
1587{
1588 struct via_ircc_cb *self;
1589 int iobase;
1590
1591 IRDA_DEBUG(3, "%s()\n", __FUNCTION__);
1592
1593 IRDA_ASSERT(dev != NULL, return -1;);
1594 self = (struct via_ircc_cb *) dev->priv;
1595 IRDA_ASSERT(self != NULL, return 0;);
1596
1597 /* Stop device */
1598 netif_stop_queue(dev);
1599 /* Stop and remove instance of IrLAP */
1600 if (self->irlap)
1601 irlap_close(self->irlap);
1602 self->irlap = NULL;
1603 iobase = self->io.fir_base;
1604 EnTXDMA(iobase, OFF);
1605 EnRXDMA(iobase, OFF);
1606 DisableDmaChannel(self->io.dma);
1607
1608 /* Disable interrupts */
1609 EnAllInt(iobase, OFF);
1610 free_irq(self->io.irq, dev);
1611 free_dma(self->io.dma);
1612
1613 return 0;
1614}
1615
1616/*
1617 * Function via_ircc_net_ioctl (dev, rq, cmd)
1618 *
1619 * Process IOCTL commands for this device
1620 *
1621 */
1622static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1623 int cmd)
1624{
1625 struct if_irda_req *irq = (struct if_irda_req *) rq;
1626 struct via_ircc_cb *self;
1627 unsigned long flags;
1628 int ret = 0;
1629
1630 IRDA_ASSERT(dev != NULL, return -1;);
1631 self = dev->priv;
1632 IRDA_ASSERT(self != NULL, return -1;);
1633 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name,
1634 cmd);
1635 /* Disable interrupts & save flags */
1636 spin_lock_irqsave(&self->lock, flags);
1637 switch (cmd) {
1638 case SIOCSBANDWIDTH: /* Set bandwidth */
1639 if (!capable(CAP_NET_ADMIN)) {
1640 ret = -EPERM;
1641 goto out;
1642 }
1643 via_ircc_change_speed(self, irq->ifr_baudrate);
1644 break;
1645 case SIOCSMEDIABUSY: /* Set media busy */
1646 if (!capable(CAP_NET_ADMIN)) {
1647 ret = -EPERM;
1648 goto out;
1649 }
1650 irda_device_set_media_busy(self->netdev, TRUE);
1651 break;
1652 case SIOCGRECEIVING: /* Check if we are receiving right now */
1653 irq->ifr_receiving = via_ircc_is_receiving(self);
1654 break;
1655 default:
1656 ret = -EOPNOTSUPP;
1657 }
1658 out:
1659 spin_unlock_irqrestore(&self->lock, flags);
1660 return ret;
1661}
1662
1663static struct net_device_stats *via_ircc_net_get_stats(struct net_device
1664 *dev)
1665{
1666 struct via_ircc_cb *self = (struct via_ircc_cb *) dev->priv;
1667
1668 return &self->stats;
1669}
1670
1671MODULE_AUTHOR("VIA Technologies,inc");
1672MODULE_DESCRIPTION("VIA IrDA Device Driver");
1673MODULE_LICENSE("GPL");
1674
1675module_init(via_ircc_init);
1676module_exit(via_ircc_cleanup);
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
new file mode 100644
index 000000000000..204b1b34ffc7
--- /dev/null
+++ b/drivers/net/irda/via-ircc.h
@@ -0,0 +1,853 @@
1/*********************************************************************
2 *
3 * Filename: via-ircc.h
4 * Version: 1.0
5 * Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
6 * Author: VIA Technologies, inc
7 * Date : 08/06/2003
8
9Copyright (c) 1998-2003 VIA Technologies, Inc.
10
11This program is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free Software
13Foundation; either version 2, or (at your option) any later version.
14
15This program is distributed in the hope that it will be useful, but WITHOUT
16ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
17MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
18See the GNU General Public License for more details.
19
20You should have received a copy of the GNU General Public License along with
21this program; if not, write to the Free Software Foundation, Inc.,
2259 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 * Comment:
25 * jul/08/2002 : Rx buffer length should use Rx ring ptr.
26 * Oct/28/2002 : Add SB id for 3147 and 3177.
27 * jul/09/2002 : only implement two kind of dongle currently.
28 * Oct/02/2002 : work on VT8231 and VT8233 .
29 * Aug/06/2003 : change driver format to pci driver .
30 ********************************************************************/
31#ifndef via_IRCC_H
32#define via_IRCC_H
33#include <linux/time.h>
34#include <linux/spinlock.h>
35#include <linux/pm.h>
36#include <linux/types.h>
37#include <asm/io.h>
38
39#define MAX_TX_WINDOW 7
40#define MAX_RX_WINDOW 7
41
42struct st_fifo_entry {
43 int status;
44 int len;
45};
46
47struct st_fifo {
48 struct st_fifo_entry entries[MAX_RX_WINDOW + 2];
49 int pending_bytes;
50 int head;
51 int tail;
52 int len;
53};
54
55struct frame_cb {
56 void *start; /* Start of frame in DMA mem */
57 int len; /* Lenght of frame in DMA mem */
58};
59
60struct tx_fifo {
61 struct frame_cb queue[MAX_TX_WINDOW + 2]; /* Info about frames in queue */
62 int ptr; /* Currently being sent */
63 int len; /* Lenght of queue */
64 int free; /* Next free slot */
65 void *tail; /* Next free start in DMA mem */
66};
67
68
69struct eventflag // for keeping track of Interrupt Events
70{
71 //--------tx part
72 unsigned char TxFIFOUnderRun;
73 unsigned char EOMessage;
74 unsigned char TxFIFOReady;
75 unsigned char EarlyEOM;
76 //--------rx part
77 unsigned char PHYErr;
78 unsigned char CRCErr;
79 unsigned char RxFIFOOverRun;
80 unsigned char EOPacket;
81 unsigned char RxAvail;
82 unsigned char TooLargePacket;
83 unsigned char SIRBad;
84 //--------unknown
85 unsigned char Unknown;
86 //----------
87 unsigned char TimeOut;
88 unsigned char RxDMATC;
89 unsigned char TxDMATC;
90};
91
92/* Private data for each instance */
93struct via_ircc_cb {
94 struct st_fifo st_fifo; /* Info about received frames */
95 struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
96
97 struct net_device *netdev; /* Yes! we are some kind of netdevice */
98 struct net_device_stats stats;
99
100 struct irlap_cb *irlap; /* The link layer we are binded to */
101 struct qos_info qos; /* QoS capabilities for this device */
102
103 chipio_t io; /* IrDA controller information */
104 iobuff_t tx_buff; /* Transmit buffer */
105 iobuff_t rx_buff; /* Receive buffer */
106 dma_addr_t tx_buff_dma;
107 dma_addr_t rx_buff_dma;
108
109 __u8 ier; /* Interrupt enable register */
110
111 struct timeval stamp;
112 struct timeval now;
113
114 spinlock_t lock; /* For serializing operations */
115
116 __u32 flags; /* Interface flags */
117 __u32 new_speed;
118 int index; /* Instance index */
119
120 struct eventflag EventFlag;
121 struct pm_dev *dev;
122 unsigned int chip_id; /* to remember chip id */
123 unsigned int RetryCount;
124 unsigned int RxDataReady;
125 unsigned int RxLastCount;
126};
127
128
129//---------I=Infrared, H=Host, M=Misc, T=Tx, R=Rx, ST=Status,
130// CF=Config, CT=Control, L=Low, H=High, C=Count
131#define I_CF_L_0 0x10
132#define I_CF_H_0 0x11
133#define I_SIR_BOF 0x12
134#define I_SIR_EOF 0x13
135#define I_ST_CT_0 0x15
136#define I_ST_L_1 0x16
137#define I_ST_H_1 0x17
138#define I_CF_L_1 0x18
139#define I_CF_H_1 0x19
140#define I_CF_L_2 0x1a
141#define I_CF_H_2 0x1b
142#define I_CF_3 0x1e
143#define H_CT 0x20
144#define H_ST 0x21
145#define M_CT 0x22
146#define TX_CT_1 0x23
147#define TX_CT_2 0x24
148#define TX_ST 0x25
149#define RX_CT 0x26
150#define RX_ST 0x27
151#define RESET 0x28
152#define P_ADDR 0x29
153#define RX_C_L 0x2a
154#define RX_C_H 0x2b
155#define RX_P_L 0x2c
156#define RX_P_H 0x2d
157#define TX_C_L 0x2e
158#define TX_C_H 0x2f
159#define TIMER 0x32
160#define I_CF_4 0x33
161#define I_T_C_L 0x34
162#define I_T_C_H 0x35
163#define VERSION 0x3f
164//-------------------------------
165#define StartAddr 0x10 // the first register address
166#define EndAddr 0x3f // the last register address
167#define GetBit(val,bit) val = (unsigned char) ((val>>bit) & 0x1)
168 // Returns the bit
169#define SetBit(val,bit) val= (unsigned char ) (val | (0x1 << bit))
170 // Sets bit to 1
171#define ResetBit(val,bit) val= (unsigned char ) (val & ~(0x1 << bit))
172 // Sets bit to 0
173
174#define OFF 0
175#define ON 1
176#define DMA_TX_MODE 0x08
177#define DMA_RX_MODE 0x04
178
179#define DMA1 0
180#define DMA2 0xc0
181#define MASK1 DMA1+0x0a
182#define MASK2 DMA2+0x14
183
184#define Clk_bit 0x40
185#define Tx_bit 0x01
186#define Rd_Valid 0x08
187#define RxBit 0x08
188
189static void DisableDmaChannel(unsigned int channel)
190{
191 switch (channel) { // 8 Bit DMA channels DMAC1
192 case 0:
193 outb(4, MASK1); //mask channel 0
194 break;
195 case 1:
196 outb(5, MASK1); //Mask channel 1
197 break;
198 case 2:
199 outb(6, MASK1); //Mask channel 2
200 break;
201 case 3:
202 outb(7, MASK1); //Mask channel 3
203 break;
204 case 5:
205 outb(5, MASK2); //Mask channel 5
206 break;
207 case 6:
208 outb(6, MASK2); //Mask channel 6
209 break;
210 case 7:
211 outb(7, MASK2); //Mask channel 7
212 break;
213 default:
214 break;
215 }; //Switch
216}
217
218static unsigned char ReadLPCReg(int iRegNum)
219{
220 unsigned char iVal;
221
222 outb(0x87, 0x2e);
223 outb(0x87, 0x2e);
224 outb(iRegNum, 0x2e);
225 iVal = inb(0x2f);
226 outb(0xaa, 0x2e);
227
228 return iVal;
229}
230
231static void WriteLPCReg(int iRegNum, unsigned char iVal)
232{
233
234 outb(0x87, 0x2e);
235 outb(0x87, 0x2e);
236 outb(iRegNum, 0x2e);
237 outb(iVal, 0x2f);
238 outb(0xAA, 0x2e);
239}
240
241static __u8 ReadReg(unsigned int BaseAddr, int iRegNum)
242{
243 return ((__u8) inb(BaseAddr + iRegNum));
244}
245
246static void WriteReg(unsigned int BaseAddr, int iRegNum, unsigned char iVal)
247{
248 outb(iVal, BaseAddr + iRegNum);
249}
250
251static int WriteRegBit(unsigned int BaseAddr, unsigned char RegNum,
252 unsigned char BitPos, unsigned char value)
253{
254 __u8 Rtemp, Wtemp;
255
256 if (BitPos > 7) {
257 return -1;
258 }
259 if ((RegNum < StartAddr) || (RegNum > EndAddr))
260 return -1;
261 Rtemp = ReadReg(BaseAddr, RegNum);
262 if (value == 0)
263 Wtemp = ResetBit(Rtemp, BitPos);
264 else {
265 if (value == 1)
266 Wtemp = SetBit(Rtemp, BitPos);
267 else
268 return -1;
269 }
270 WriteReg(BaseAddr, RegNum, Wtemp);
271 return 0;
272}
273
274static __u8 CheckRegBit(unsigned int BaseAddr, unsigned char RegNum,
275 unsigned char BitPos)
276{
277 __u8 temp;
278
279 if (BitPos > 7)
280 return 0xff;
281 if ((RegNum < StartAddr) || (RegNum > EndAddr)) {
282// printf("what is the register %x!\n",RegNum);
283 }
284 temp = ReadReg(BaseAddr, RegNum);
285 return GetBit(temp, BitPos);
286}
287
288static void SetMaxRxPacketSize(__u16 iobase, __u16 size)
289{
290 __u16 low, high;
291 if ((size & 0xe000) == 0) {
292 low = size & 0x00ff;
293 high = (size & 0x1f00) >> 8;
294 WriteReg(iobase, I_CF_L_2, low);
295 WriteReg(iobase, I_CF_H_2, high);
296
297 }
298
299}
300
301//for both Rx and Tx
302
303static void SetFIFO(__u16 iobase, __u16 value)
304{
305 switch (value) {
306 case 128:
307 WriteRegBit(iobase, 0x11, 0, 0);
308 WriteRegBit(iobase, 0x11, 7, 1);
309 break;
310 case 64:
311 WriteRegBit(iobase, 0x11, 0, 0);
312 WriteRegBit(iobase, 0x11, 7, 0);
313 break;
314 case 32:
315 WriteRegBit(iobase, 0x11, 0, 1);
316 WriteRegBit(iobase, 0x11, 7, 0);
317 break;
318 default:
319 WriteRegBit(iobase, 0x11, 0, 0);
320 WriteRegBit(iobase, 0x11, 7, 0);
321 }
322
323}
324
325#define CRC16(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,7,val) //0 for 32 CRC
326/*
327#define SetVFIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,5,val)
328#define SetFIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,6,val)
329#define SetMIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,5,val)
330#define SetSIR(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,4,val)
331*/
332#define SIRFilter(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,3,val)
333#define Filter(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,2,val)
334#define InvertTX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,1,val)
335#define InvertRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_L_0,0,val)
336//****************************I_CF_H_0
337#define EnableTX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,4,val)
338#define EnableRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,3,val)
339#define EnableDMA(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,2,val)
340#define SIRRecvAny(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,1,val)
341#define DiableTrans(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_H_0,0,val)
342//***************************I_SIR_BOF,I_SIR_EOF
343#define SetSIRBOF(BaseAddr,val) WriteReg(BaseAddr,I_SIR_BOF,val)
344#define SetSIREOF(BaseAddr,val) WriteReg(BaseAddr,I_SIR_EOF,val)
345#define GetSIRBOF(BaseAddr) ReadReg(BaseAddr,I_SIR_BOF)
346#define GetSIREOF(BaseAddr) ReadReg(BaseAddr,I_SIR_EOF)
347//*******************I_ST_CT_0
348#define EnPhys(BaseAddr,val) WriteRegBit(BaseAddr,I_ST_CT_0,7,val)
349#define IsModeError(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,6) //RO
350#define IsVFIROn(BaseAddr) CheckRegBit(BaseAddr,0x14,0) //RO for VT1211 only
351#define IsFIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,5) //RO
352#define IsMIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,4) //RO
353#define IsSIROn(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,3) //RO
354#define IsEnableTX(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,2) //RO
355#define IsEnableRX(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,1) //RO
356#define Is16CRC(BaseAddr) CheckRegBit(BaseAddr,I_ST_CT_0,0) //RO
357//***************************I_CF_3
358#define DisableAdjacentPulseWidth(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,5,val) //1 disable
359#define DisablePulseWidthAdjust(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,4,val) //1 disable
360#define UseOneRX(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,1,val) //0 use two RX
361#define SlowIRRXLowActive(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_3,0,val) //0 show RX high=1 in SIR
362//***************************H_CT
363#define EnAllInt(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,7,val)
364#define TXStart(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,6,val)
365#define RXStart(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,5,val)
366#define ClearRXInt(BaseAddr,val) WriteRegBit(BaseAddr,H_CT,4,val) // 1 clear
367//*****************H_ST
368#define IsRXInt(BaseAddr) CheckRegBit(BaseAddr,H_ST,4)
369#define GetIntIndentify(BaseAddr) ((ReadReg(BaseAddr,H_ST)&0xf1) >>1)
370#define IsHostBusy(BaseAddr) CheckRegBit(BaseAddr,H_ST,0)
371#define GetHostStatus(BaseAddr) ReadReg(BaseAddr,H_ST) //RO
372//**************************M_CT
373#define EnTXDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,7,val)
374#define EnRXDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,6,val)
375#define SwapDMA(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,5,val)
376#define EnInternalLoop(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,4,val)
377#define EnExternalLoop(BaseAddr,val) WriteRegBit(BaseAddr,M_CT,3,val)
378//**************************TX_CT_1
379#define EnTXFIFOHalfLevelInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,4,val) //half empty int (1 half)
380#define EnTXFIFOUnderrunEOMInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,5,val)
381#define EnTXFIFOReadyInt(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_1,6,val) //int when reach it threshold (setting by bit 4)
382//**************************TX_CT_2
383#define ForceUnderrun(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,7,val) // force an underrun int
384#define EnTXCRC(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,6,val) //1 for FIR,MIR...0 (not SIR)
385#define ForceBADCRC(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,5,val) //force an bad CRC
386#define SendSIP(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,4,val) //send indication pulse for prevent SIR disturb
387#define ClearEnTX(BaseAddr,val) WriteRegBit(BaseAddr,TX_CT_2,3,val) // opposite to EnTX
388//*****************TX_ST
389#define GetTXStatus(BaseAddr) ReadReg(BaseAddr,TX_ST) //RO
390//**************************RX_CT
391#define EnRXSpecInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,0,val)
392#define EnRXFIFOReadyInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,1,val) //enable int when reach it threshold (setting by bit 7)
393#define EnRXFIFOHalfLevelInt(BaseAddr,val) WriteRegBit(BaseAddr,RX_CT,7,val) //enable int when (1) half full...or (0) just not full
394//*****************RX_ST
395#define GetRXStatus(BaseAddr) ReadReg(BaseAddr,RX_ST) //RO
396//***********************P_ADDR
397#define SetPacketAddr(BaseAddr,addr) WriteReg(BaseAddr,P_ADDR,addr)
398//***********************I_CF_4
399#define EnGPIOtoRX2(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,7,val)
400#define EnTimerInt(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,1,val)
401#define ClearTimerInt(BaseAddr,val) WriteRegBit(BaseAddr,I_CF_4,0,val)
402//***********************I_T_C_L
403#define WriteGIO(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_L,7,val)
404#define ReadGIO(BaseAddr) CheckRegBit(BaseAddr,I_T_C_L,7)
405#define ReadRX(BaseAddr) CheckRegBit(BaseAddr,I_T_C_L,3) //RO
406#define WriteTX(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_L,0,val)
407//***********************I_T_C_H
408#define EnRX2(BaseAddr,val) WriteRegBit(BaseAddr,I_T_C_H,7,val)
409#define ReadRX2(BaseAddr) CheckRegBit(BaseAddr,I_T_C_H,7)
410//**********************Version
411#define GetFIRVersion(BaseAddr) ReadReg(BaseAddr,VERSION)
412
413
414static void SetTimer(__u16 iobase, __u8 count)
415{
416 EnTimerInt(iobase, OFF);
417 WriteReg(iobase, TIMER, count);
418 EnTimerInt(iobase, ON);
419}
420
421
422static void SetSendByte(__u16 iobase, __u32 count)
423{
424 __u32 low, high;
425
426 if ((count & 0xf000) == 0) {
427 low = count & 0x00ff;
428 high = (count & 0x0f00) >> 8;
429 WriteReg(iobase, TX_C_L, low);
430 WriteReg(iobase, TX_C_H, high);
431 }
432}
433
434static void ResetChip(__u16 iobase, __u8 type)
435{
436 __u8 value;
437
438 value = (type + 2) << 4;
439 WriteReg(iobase, RESET, type);
440}
441
442static int CkRxRecv(__u16 iobase, struct via_ircc_cb *self)
443{
444 __u8 low, high;
445 __u16 wTmp = 0, wTmp1 = 0, wTmp_new = 0;
446
447 low = ReadReg(iobase, RX_C_L);
448 high = ReadReg(iobase, RX_C_H);
449 wTmp1 = high;
450 wTmp = (wTmp1 << 8) | low;
451 udelay(10);
452 low = ReadReg(iobase, RX_C_L);
453 high = ReadReg(iobase, RX_C_H);
454 wTmp1 = high;
455 wTmp_new = (wTmp1 << 8) | low;
456 if (wTmp_new != wTmp)
457 return 1;
458 else
459 return 0;
460
461}
462
463static __u16 RxCurCount(__u16 iobase, struct via_ircc_cb * self)
464{
465 __u8 low, high;
466 __u16 wTmp = 0, wTmp1 = 0;
467
468 low = ReadReg(iobase, RX_P_L);
469 high = ReadReg(iobase, RX_P_H);
470 wTmp1 = high;
471 wTmp = (wTmp1 << 8) | low;
472 return wTmp;
473}
474
475/* This Routine can only use in recevie_complete
476 * for it will update last count.
477 */
478
479static __u16 GetRecvByte(__u16 iobase, struct via_ircc_cb * self)
480{
481 __u8 low, high;
482 __u16 wTmp, wTmp1, ret;
483
484 low = ReadReg(iobase, RX_P_L);
485 high = ReadReg(iobase, RX_P_H);
486 wTmp1 = high;
487 wTmp = (wTmp1 << 8) | low;
488
489
490 if (wTmp >= self->RxLastCount)
491 ret = wTmp - self->RxLastCount;
492 else
493 ret = (0x8000 - self->RxLastCount) + wTmp;
494 self->RxLastCount = wTmp;
495
496/* RX_P is more actually the RX_C
497 low=ReadReg(iobase,RX_C_L);
498 high=ReadReg(iobase,RX_C_H);
499
500 if(!(high&0xe000)) {
501 temp=(high<<8)+low;
502 return temp;
503 }
504 else return 0;
505*/
506 return ret;
507}
508
509static void Sdelay(__u16 scale)
510{
511 __u8 bTmp;
512 int i, j;
513
514 for (j = 0; j < scale; j++) {
515 for (i = 0; i < 0x20; i++) {
516 bTmp = inb(0xeb);
517 outb(bTmp, 0xeb);
518 }
519 }
520}
521
522static void Tdelay(__u16 scale)
523{
524 __u8 bTmp;
525 int i, j;
526
527 for (j = 0; j < scale; j++) {
528 for (i = 0; i < 0x50; i++) {
529 bTmp = inb(0xeb);
530 outb(bTmp, 0xeb);
531 }
532 }
533}
534
535
536static void ActClk(__u16 iobase, __u8 value)
537{
538 __u8 bTmp;
539 bTmp = ReadReg(iobase, 0x34);
540 if (value)
541 WriteReg(iobase, 0x34, bTmp | Clk_bit);
542 else
543 WriteReg(iobase, 0x34, bTmp & ~Clk_bit);
544}
545
546static void ClkTx(__u16 iobase, __u8 Clk, __u8 Tx)
547{
548 __u8 bTmp;
549
550 bTmp = ReadReg(iobase, 0x34);
551 if (Clk == 0)
552 bTmp &= ~Clk_bit;
553 else {
554 if (Clk == 1)
555 bTmp |= Clk_bit;
556 }
557 WriteReg(iobase, 0x34, bTmp);
558 Sdelay(1);
559 if (Tx == 0)
560 bTmp &= ~Tx_bit;
561 else {
562 if (Tx == 1)
563 bTmp |= Tx_bit;
564 }
565 WriteReg(iobase, 0x34, bTmp);
566}
567
568static void Wr_Byte(__u16 iobase, __u8 data)
569{
570 __u8 bData = data;
571// __u8 btmp;
572 int i;
573
574 ClkTx(iobase, 0, 1);
575
576 Tdelay(2);
577 ActClk(iobase, 1);
578 Tdelay(1);
579
580 for (i = 0; i < 8; i++) { //LDN
581
582 if ((bData >> i) & 0x01) {
583 ClkTx(iobase, 0, 1); //bit data = 1;
584 } else {
585 ClkTx(iobase, 0, 0); //bit data = 1;
586 }
587 Tdelay(2);
588 Sdelay(1);
589 ActClk(iobase, 1); //clk hi
590 Tdelay(1);
591 }
592}
593
594static __u8 Rd_Indx(__u16 iobase, __u8 addr, __u8 index)
595{
596 __u8 data = 0, bTmp, data_bit;
597 int i;
598
599 bTmp = addr | (index << 1) | 0;
600 ClkTx(iobase, 0, 0);
601 Tdelay(2);
602 ActClk(iobase, 1);
603 udelay(1);
604 Wr_Byte(iobase, bTmp);
605 Sdelay(1);
606 ClkTx(iobase, 0, 0);
607 Tdelay(2);
608 for (i = 0; i < 10; i++) {
609 ActClk(iobase, 1);
610 Tdelay(1);
611 ActClk(iobase, 0);
612 Tdelay(1);
613 ClkTx(iobase, 0, 1);
614 Tdelay(1);
615 bTmp = ReadReg(iobase, 0x34);
616 if (!(bTmp & Rd_Valid))
617 break;
618 }
619 if (!(bTmp & Rd_Valid)) {
620 for (i = 0; i < 8; i++) {
621 ActClk(iobase, 1);
622 Tdelay(1);
623 ActClk(iobase, 0);
624 bTmp = ReadReg(iobase, 0x34);
625 data_bit = 1 << i;
626 if (bTmp & RxBit)
627 data |= data_bit;
628 else
629 data &= ~data_bit;
630 Tdelay(2);
631 }
632 } else {
633 for (i = 0; i < 2; i++) {
634 ActClk(iobase, 1);
635 Tdelay(1);
636 ActClk(iobase, 0);
637 Tdelay(2);
638 }
639 bTmp = ReadReg(iobase, 0x34);
640 }
641 for (i = 0; i < 1; i++) {
642 ActClk(iobase, 1);
643 Tdelay(1);
644 ActClk(iobase, 0);
645 Tdelay(2);
646 }
647 ClkTx(iobase, 0, 0);
648 Tdelay(1);
649 for (i = 0; i < 3; i++) {
650 ActClk(iobase, 1);
651 Tdelay(1);
652 ActClk(iobase, 0);
653 Tdelay(2);
654 }
655 return data;
656}
657
658static void Wr_Indx(__u16 iobase, __u8 addr, __u8 index, __u8 data)
659{
660 int i;
661 __u8 bTmp;
662
663 ClkTx(iobase, 0, 0);
664 udelay(2);
665 ActClk(iobase, 1);
666 udelay(1);
667 bTmp = addr | (index << 1) | 1;
668 Wr_Byte(iobase, bTmp);
669 Wr_Byte(iobase, data);
670 for (i = 0; i < 2; i++) {
671 ClkTx(iobase, 0, 0);
672 Tdelay(2);
673 ActClk(iobase, 1);
674 Tdelay(1);
675 }
676 ActClk(iobase, 0);
677}
678
679static void ResetDongle(__u16 iobase)
680{
681 int i;
682 ClkTx(iobase, 0, 0);
683 Tdelay(1);
684 for (i = 0; i < 30; i++) {
685 ActClk(iobase, 1);
686 Tdelay(1);
687 ActClk(iobase, 0);
688 Tdelay(1);
689 }
690 ActClk(iobase, 0);
691}
692
693static void SetSITmode(__u16 iobase)
694{
695
696 __u8 bTmp;
697
698 bTmp = ReadLPCReg(0x28);
699 WriteLPCReg(0x28, bTmp | 0x10); //select ITMOFF
700 bTmp = ReadReg(iobase, 0x35);
701 WriteReg(iobase, 0x35, bTmp | 0x40); // Driver ITMOFF
702 WriteReg(iobase, 0x28, bTmp | 0x80); // enable All interrupt
703}
704
705static void SI_SetMode(__u16 iobase, int mode)
706{
707 //__u32 dTmp;
708 __u8 bTmp;
709
710 WriteLPCReg(0x28, 0x70); // S/W Reset
711 SetSITmode(iobase);
712 ResetDongle(iobase);
713 udelay(10);
714 Wr_Indx(iobase, 0x40, 0x0, 0x17); //RX ,APEN enable,Normal power
715 Wr_Indx(iobase, 0x40, 0x1, mode); //Set Mode
716 Wr_Indx(iobase, 0x40, 0x2, 0xff); //Set power to FIR VFIR > 1m
717 bTmp = Rd_Indx(iobase, 0x40, 1);
718}
719
720static void InitCard(__u16 iobase)
721{
722 ResetChip(iobase, 5);
723 WriteReg(iobase, I_ST_CT_0, 0x00); // open CHIP on
724 SetSIRBOF(iobase, 0xc0); // hardware default value
725 SetSIREOF(iobase, 0xc1);
726}
727
728static void CommonInit(__u16 iobase)
729{
730// EnTXCRC(iobase,0);
731 SwapDMA(iobase, OFF);
732 SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
733 EnRXFIFOReadyInt(iobase, OFF);
734 EnRXFIFOHalfLevelInt(iobase, OFF);
735 EnTXFIFOHalfLevelInt(iobase, OFF);
736 EnTXFIFOUnderrunEOMInt(iobase, ON);
737// EnTXFIFOReadyInt(iobase,ON);
738 InvertTX(iobase, OFF);
739 InvertRX(iobase, OFF);
740// WriteLPCReg(0xF0,0); //(if VT1211 then do this)
741 if (IsSIROn(iobase)) {
742 SIRFilter(iobase, ON);
743 SIRRecvAny(iobase, ON);
744 } else {
745 SIRFilter(iobase, OFF);
746 SIRRecvAny(iobase, OFF);
747 }
748 EnRXSpecInt(iobase, ON);
749 WriteReg(iobase, I_ST_CT_0, 0x80);
750 EnableDMA(iobase, ON);
751}
752
753static void SetBaudRate(__u16 iobase, __u32 rate)
754{
755 __u8 value = 11, temp;
756
757 if (IsSIROn(iobase)) {
758 switch (rate) {
759 case (__u32) (2400L):
760 value = 47;
761 break;
762 case (__u32) (9600L):
763 value = 11;
764 break;
765 case (__u32) (19200L):
766 value = 5;
767 break;
768 case (__u32) (38400L):
769 value = 2;
770 break;
771 case (__u32) (57600L):
772 value = 1;
773 break;
774 case (__u32) (115200L):
775 value = 0;
776 break;
777 default:
778 break;
779 };
780 } else if (IsMIROn(iobase)) {
781 value = 0; // will automatically be fixed in 1.152M
782 } else if (IsFIROn(iobase)) {
783 value = 0; // will automatically be fixed in 4M
784 }
785 temp = (ReadReg(iobase, I_CF_H_1) & 0x03);
786 temp |= value << 2;
787 WriteReg(iobase, I_CF_H_1, temp);
788}
789
790static void SetPulseWidth(__u16 iobase, __u8 width)
791{
792 __u8 temp, temp1, temp2;
793
794 temp = (ReadReg(iobase, I_CF_L_1) & 0x1f);
795 temp1 = (ReadReg(iobase, I_CF_H_1) & 0xfc);
796 temp2 = (width & 0x07) << 5;
797 temp |= temp2;
798 temp2 = (width & 0x18) >> 3;
799 temp1 |= temp2;
800 WriteReg(iobase, I_CF_L_1, temp);
801 WriteReg(iobase, I_CF_H_1, temp1);
802}
803
804static void SetSendPreambleCount(__u16 iobase, __u8 count)
805{
806 __u8 temp;
807
808 temp = ReadReg(iobase, I_CF_L_1) & 0xe0;
809 temp |= count;
810 WriteReg(iobase, I_CF_L_1, temp);
811
812}
813
814static void SetVFIR(__u16 BaseAddr, __u8 val)
815{
816 __u8 tmp;
817
818 tmp = ReadReg(BaseAddr, I_CF_L_0);
819 WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
820 WriteRegBit(BaseAddr, I_CF_H_0, 5, val);
821}
822
823static void SetFIR(__u16 BaseAddr, __u8 val)
824{
825 __u8 tmp;
826
827 WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
828 tmp = ReadReg(BaseAddr, I_CF_L_0);
829 WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
830 WriteRegBit(BaseAddr, I_CF_L_0, 6, val);
831}
832
833static void SetMIR(__u16 BaseAddr, __u8 val)
834{
835 __u8 tmp;
836
837 WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
838 tmp = ReadReg(BaseAddr, I_CF_L_0);
839 WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
840 WriteRegBit(BaseAddr, I_CF_L_0, 5, val);
841}
842
843static void SetSIR(__u16 BaseAddr, __u8 val)
844{
845 __u8 tmp;
846
847 WriteRegBit(BaseAddr, I_CF_H_0, 5, 0);
848 tmp = ReadReg(BaseAddr, I_CF_L_0);
849 WriteReg(BaseAddr, I_CF_L_0, tmp & 0x8f);
850 WriteRegBit(BaseAddr, I_CF_L_0, 4, val);
851}
852
853#endif /* via_IRCC_H */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
new file mode 100644
index 000000000000..35fad8171a01
--- /dev/null
+++ b/drivers/net/irda/vlsi_ir.c
@@ -0,0 +1,1912 @@
1/*********************************************************************
2 *
3 * vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux
4 *
5 * Copyright (c) 2001-2003 Martin Diehl
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
20 * MA 02111-1307 USA
21 *
22 ********************************************************************/
23
24#include <linux/config.h>
25#include <linux/module.h>
26
27#define DRIVER_NAME "vlsi_ir"
28#define DRIVER_VERSION "v0.5"
29#define DRIVER_DESCRIPTION "IrDA SIR/MIR/FIR driver for VLSI 82C147"
30#define DRIVER_AUTHOR "Martin Diehl <info@mdiehl.de>"
31
32MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
33MODULE_AUTHOR(DRIVER_AUTHOR);
34MODULE_LICENSE("GPL");
35
36/********************************************************/
37
38#include <linux/kernel.h>
39#include <linux/init.h>
40#include <linux/pci.h>
41#include <linux/slab.h>
42#include <linux/netdevice.h>
43#include <linux/skbuff.h>
44#include <linux/delay.h>
45#include <linux/time.h>
46#include <linux/proc_fs.h>
47#include <linux/seq_file.h>
48#include <linux/smp_lock.h>
49#include <asm/uaccess.h>
50#include <asm/byteorder.h>
51
52#include <net/irda/irda.h>
53#include <net/irda/irda_device.h>
54#include <net/irda/wrapper.h>
55#include <net/irda/crc.h>
56
57#include "vlsi_ir.h"
58
59/********************************************************/
60
61static /* const */ char drivername[] = DRIVER_NAME;
62
63static struct pci_device_id vlsi_irda_table [] = {
64 {
65 .class = PCI_CLASS_WIRELESS_IRDA << 8,
66 .class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
67 .vendor = PCI_VENDOR_ID_VLSI,
68 .device = PCI_DEVICE_ID_VLSI_82C147,
69 .subvendor = PCI_ANY_ID,
70 .subdevice = PCI_ANY_ID,
71 },
72 { /* all zeroes */ }
73};
74
75MODULE_DEVICE_TABLE(pci, vlsi_irda_table);
76
77/********************************************************/
78
79/* clksrc: which clock source to be used
80 * 0: auto - try PLL, fallback to 40MHz XCLK
81 * 1: on-chip 48MHz PLL
82 * 2: external 48MHz XCLK
83 * 3: external 40MHz XCLK (HP OB-800)
84 */
85
86static int clksrc = 0; /* default is 0(auto) */
87module_param(clksrc, int, 0);
88MODULE_PARM_DESC(clksrc, "clock input source selection");
89
90/* ringsize: size of the tx and rx descriptor rings
91 * independent for tx and rx
92 * specify as ringsize=tx[,rx]
93 * allowed values: 4, 8, 16, 32, 64
94 * Due to the IrDA 1.x max. allowed window size=7,
95 * there should be no gain when using rings larger than 8
96 */
97
98static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */
99module_param_array(ringsize, int, NULL, 0);
100MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
101
102/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits
103 * 0: very short, 1.5us (exception: 6us at 2.4 kbaud)
104 * 1: nominal 3/16 bittime width
105 * note: IrDA compliant peer devices should be happy regardless
106 * which one is used. Primary goal is to save some power
107 * on the sender's side - at 9.6kbaud for example the short
108 * pulse width saves more than 90% of the transmitted IR power.
109 */
110
111static int sirpulse = 1; /* default is 3/16 bittime */
112module_param(sirpulse, int, 0);
113MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
114
115/* qos_mtt_bits: encoded min-turn-time value we require the peer device
116 * to use before transmitting to us. "Type 1" (per-station)
117 * bitfield according to IrLAP definition (section 6.6.8)
118 * Don't know which transceiver is used by my OB800 - the
119 * pretty common HP HDLS-1100 requires 1 msec - so lets use this.
120 */
121
122static int qos_mtt_bits = 0x07; /* default is 1 ms or more */
123module_param(qos_mtt_bits, int, 0);
124MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
125
126/********************************************************/
127
128static void vlsi_reg_debug(unsigned iobase, const char *s)
129{
130 int i;
131
132 printk(KERN_DEBUG "%s: ", s);
133 for (i = 0; i < 0x20; i++)
134 printk("%02x", (unsigned)inb((iobase+i)));
135 printk("\n");
136}
137
138static void vlsi_ring_debug(struct vlsi_ring *r)
139{
140 struct ring_descr *rd;
141 unsigned i;
142
143 printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
144 __FUNCTION__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
145 printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __FUNCTION__,
146 atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
147 for (i = 0; i < r->size; i++) {
148 rd = &r->rd[i];
149 printk(KERN_DEBUG "%s - ring descr %u: ", __FUNCTION__, i);
150 printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
151 printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
152 __FUNCTION__, (unsigned) rd_get_status(rd),
153 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
154 }
155}
156
157/********************************************************/
158
159/* needed regardless of CONFIG_PROC_FS */
160static struct proc_dir_entry *vlsi_proc_root = NULL;
161
162#ifdef CONFIG_PROC_FS
163
164static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
165{
166 unsigned iobase = pci_resource_start(pdev, 0);
167 unsigned i;
168
169 seq_printf(seq, "\n%s (vid/did: %04x/%04x)\n",
170 PCIDEV_NAME(pdev), (int)pdev->vendor, (int)pdev->device);
171 seq_printf(seq, "pci-power-state: %u\n", (unsigned) pdev->current_state);
172 seq_printf(seq, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
173 pdev->irq, (unsigned)pci_resource_start(pdev, 0), (unsigned long long)pdev->dma_mask);
174 seq_printf(seq, "hw registers: ");
175 for (i = 0; i < 0x20; i++)
176 seq_printf(seq, "%02x", (unsigned)inb((iobase+i)));
177 seq_printf(seq, "\n");
178}
179
180static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
181{
182 vlsi_irda_dev_t *idev = ndev->priv;
183 u8 byte;
184 u16 word;
185 unsigned delta1, delta2;
186 struct timeval now;
187 unsigned iobase = ndev->base_addr;
188
189 seq_printf(seq, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
190 netif_device_present(ndev) ? "attached" : "detached",
191 netif_running(ndev) ? "running" : "not running",
192 netif_carrier_ok(ndev) ? "carrier ok" : "no carrier",
193 netif_queue_stopped(ndev) ? "queue stopped" : "queue running");
194
195 if (!netif_running(ndev))
196 return;
197
198 seq_printf(seq, "\nhw-state:\n");
199 pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
200 seq_printf(seq, "IRMISC:%s%s%s uart%s",
201 (byte&IRMISC_IRRAIL) ? " irrail" : "",
202 (byte&IRMISC_IRPD) ? " irpd" : "",
203 (byte&IRMISC_UARTTST) ? " uarttest" : "",
204 (byte&IRMISC_UARTEN) ? "@" : " disabled\n");
205 if (byte&IRMISC_UARTEN) {
206 seq_printf(seq, "0x%s\n",
207 (byte&2) ? ((byte&1) ? "3e8" : "2e8")
208 : ((byte&1) ? "3f8" : "2f8"));
209 }
210 pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte);
211 seq_printf(seq, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n",
212 (byte&CLKCTL_PD_INV) ? "powered" : "down",
213 (byte&CLKCTL_LOCK) ? " locked" : "",
214 (byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "",
215 (byte&CLKCTL_CLKSTP) ? "stopped" : "running",
216 (byte&CLKCTL_WAKE) ? "enabled" : "disabled");
217 pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte);
218 seq_printf(seq, "MSTRPAGE: 0x%02x\n", (unsigned)byte);
219
220 byte = inb(iobase+VLSI_PIO_IRINTR);
221 seq_printf(seq, "IRINTR:%s%s%s%s%s%s%s%s\n",
222 (byte&IRINTR_ACTEN) ? " ACTEN" : "",
223 (byte&IRINTR_RPKTEN) ? " RPKTEN" : "",
224 (byte&IRINTR_TPKTEN) ? " TPKTEN" : "",
225 (byte&IRINTR_OE_EN) ? " OE_EN" : "",
226 (byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "",
227 (byte&IRINTR_RPKTINT) ? " RPKTINT" : "",
228 (byte&IRINTR_TPKTINT) ? " TPKTINT" : "",
229 (byte&IRINTR_OE_INT) ? " OE_INT" : "");
230 word = inw(iobase+VLSI_PIO_RINGPTR);
231 seq_printf(seq, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word));
232 word = inw(iobase+VLSI_PIO_RINGBASE);
233 seq_printf(seq, "RINGBASE: busmap=0x%08x\n",
234 ((unsigned)word << 10)|(MSTRPAGE_VALUE<<24));
235 word = inw(iobase+VLSI_PIO_RINGSIZE);
236 seq_printf(seq, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word),
237 RINGSIZE_TO_TXSIZE(word));
238
239 word = inw(iobase+VLSI_PIO_IRCFG);
240 seq_printf(seq, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
241 (word&IRCFG_LOOP) ? " LOOP" : "",
242 (word&IRCFG_ENTX) ? " ENTX" : "",
243 (word&IRCFG_ENRX) ? " ENRX" : "",
244 (word&IRCFG_MSTR) ? " MSTR" : "",
245 (word&IRCFG_RXANY) ? " RXANY" : "",
246 (word&IRCFG_CRC16) ? " CRC16" : "",
247 (word&IRCFG_FIR) ? " FIR" : "",
248 (word&IRCFG_MIR) ? " MIR" : "",
249 (word&IRCFG_SIR) ? " SIR" : "",
250 (word&IRCFG_SIRFILT) ? " SIRFILT" : "",
251 (word&IRCFG_SIRTEST) ? " SIRTEST" : "",
252 (word&IRCFG_TXPOL) ? " TXPOL" : "",
253 (word&IRCFG_RXPOL) ? " RXPOL" : "");
254 word = inw(iobase+VLSI_PIO_IRENABLE);
255 seq_printf(seq, "IRENABLE:%s%s%s%s%s%s%s%s\n",
256 (word&IRENABLE_PHYANDCLOCK) ? " PHYANDCLOCK" : "",
257 (word&IRENABLE_CFGER) ? " CFGERR" : "",
258 (word&IRENABLE_FIR_ON) ? " FIR_ON" : "",
259 (word&IRENABLE_MIR_ON) ? " MIR_ON" : "",
260 (word&IRENABLE_SIR_ON) ? " SIR_ON" : "",
261 (word&IRENABLE_ENTXST) ? " ENTXST" : "",
262 (word&IRENABLE_ENRXST) ? " ENRXST" : "",
263 (word&IRENABLE_CRC16_ON) ? " CRC16_ON" : "");
264 word = inw(iobase+VLSI_PIO_PHYCTL);
265 seq_printf(seq, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
266 (unsigned)PHYCTL_TO_BAUD(word),
267 (unsigned)PHYCTL_TO_PLSWID(word),
268 (unsigned)PHYCTL_TO_PREAMB(word));
269 word = inw(iobase+VLSI_PIO_NPHYCTL);
270 seq_printf(seq, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
271 (unsigned)PHYCTL_TO_BAUD(word),
272 (unsigned)PHYCTL_TO_PLSWID(word),
273 (unsigned)PHYCTL_TO_PREAMB(word));
274 word = inw(iobase+VLSI_PIO_MAXPKT);
275 seq_printf(seq, "MAXPKT: max. rx packet size = %u\n", word);
276 word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
277 seq_printf(seq, "RCVBCNT: rx-fifo filling level = %u\n", word);
278
279 seq_printf(seq, "\nsw-state:\n");
280 seq_printf(seq, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
281 (idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
282 do_gettimeofday(&now);
283 if (now.tv_usec >= idev->last_rx.tv_usec) {
284 delta2 = now.tv_usec - idev->last_rx.tv_usec;
285 delta1 = 0;
286 }
287 else {
288 delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec;
289 delta1 = 1;
290 }
291 seq_printf(seq, "last rx: %lu.%06u sec\n",
292 now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);
293
294 seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
295 idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors,
296 idev->stats.rx_dropped);
297 seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
298 idev->stats.rx_over_errors, idev->stats.rx_length_errors,
299 idev->stats.rx_frame_errors, idev->stats.rx_crc_errors);
300 seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
301 idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors,
302 idev->stats.tx_dropped, idev->stats.tx_fifo_errors);
303
304}
305
306static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
307{
308 struct ring_descr *rd;
309 unsigned i, j;
310 int h, t;
311
312 seq_printf(seq, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
313 r->size, r->mask, r->len, r->dir, r->rd[0].hw);
314 h = atomic_read(&r->head) & r->mask;
315 t = atomic_read(&r->tail) & r->mask;
316 seq_printf(seq, "head = %d / tail = %d ", h, t);
317 if (h == t)
318 seq_printf(seq, "(empty)\n");
319 else {
320 if (((t+1)&r->mask) == h)
321 seq_printf(seq, "(full)\n");
322 else
323 seq_printf(seq, "(level = %d)\n", ((unsigned)(t-h) & r->mask));
324 rd = &r->rd[h];
325 j = (unsigned) rd_get_count(rd);
326 seq_printf(seq, "current: rd = %d / status = %02x / len = %u\n",
327 h, (unsigned)rd_get_status(rd), j);
328 if (j > 0) {
329 seq_printf(seq, " data:");
330 if (j > 20)
331 j = 20;
332 for (i = 0; i < j; i++)
333 seq_printf(seq, " %02x", (unsigned)((unsigned char *)rd->buf)[i]);
334 seq_printf(seq, "\n");
335 }
336 }
337 for (i = 0; i < r->size; i++) {
338 rd = &r->rd[i];
339 seq_printf(seq, "> ring descr %u: ", i);
340 seq_printf(seq, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
341 seq_printf(seq, " hw: status=%02x count=%u busaddr=0x%08x\n",
342 (unsigned) rd_get_status(rd),
343 (unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
344 }
345}
346
347static int vlsi_seq_show(struct seq_file *seq, void *v)
348{
349 struct net_device *ndev = seq->private;
350 vlsi_irda_dev_t *idev = ndev->priv;
351 unsigned long flags;
352
353 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION);
354 seq_printf(seq, "clksrc: %s\n",
355 (clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK")
356 : ((clksrc==1)?"48MHz PLL":"autodetect"));
357 seq_printf(seq, "ringsize: tx=%d / rx=%d\n",
358 ringsize[0], ringsize[1]);
359 seq_printf(seq, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short");
360 seq_printf(seq, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits);
361
362 spin_lock_irqsave(&idev->lock, flags);
363 if (idev->pdev != NULL) {
364 vlsi_proc_pdev(seq, idev->pdev);
365
366 if (idev->pdev->current_state == 0)
367 vlsi_proc_ndev(seq, ndev);
368 else
369 seq_printf(seq, "\nPCI controller down - resume_ok = %d\n",
370 idev->resume_ok);
371 if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
372 seq_printf(seq, "\n--------- RX ring -----------\n\n");
373 vlsi_proc_ring(seq, idev->rx_ring);
374 seq_printf(seq, "\n--------- TX ring -----------\n\n");
375 vlsi_proc_ring(seq, idev->tx_ring);
376 }
377 }
378 seq_printf(seq, "\n");
379 spin_unlock_irqrestore(&idev->lock, flags);
380
381 return 0;
382}
383
384static int vlsi_seq_open(struct inode *inode, struct file *file)
385{
386 return single_open(file, vlsi_seq_show, PDE(inode)->data);
387}
388
389static struct file_operations vlsi_proc_fops = {
390 .owner = THIS_MODULE,
391 .open = vlsi_seq_open,
392 .read = seq_read,
393 .llseek = seq_lseek,
394 .release = single_release,
395};
396
397#define VLSI_PROC_FOPS (&vlsi_proc_fops)
398
399#else /* CONFIG_PROC_FS */
400#define VLSI_PROC_FOPS NULL
401#endif
402
403/********************************************************/
404
405static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap,
406 unsigned size, unsigned len, int dir)
407{
408 struct vlsi_ring *r;
409 struct ring_descr *rd;
410 unsigned i, j;
411 dma_addr_t busaddr;
412
413 if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */
414 return NULL;
415
416 r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL);
417 if (!r)
418 return NULL;
419 memset(r, 0, sizeof(*r));
420
421 r->pdev = pdev;
422 r->dir = dir;
423 r->len = len;
424 r->rd = (struct ring_descr *)(r+1);
425 r->mask = size - 1;
426 r->size = size;
427 atomic_set(&r->head, 0);
428 atomic_set(&r->tail, 0);
429
430 for (i = 0; i < size; i++) {
431 rd = r->rd + i;
432 memset(rd, 0, sizeof(*rd));
433 rd->hw = hwmap + i;
434 rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
435 if (rd->buf == NULL
436 || !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
437 if (rd->buf) {
438 IRDA_ERROR("%s: failed to create PCI-MAP for %p",
439 __FUNCTION__, rd->buf);
440 kfree(rd->buf);
441 rd->buf = NULL;
442 }
443 for (j = 0; j < i; j++) {
444 rd = r->rd + j;
445 busaddr = rd_get_addr(rd);
446 rd_set_addr_status(rd, 0, 0);
447 if (busaddr)
448 pci_unmap_single(pdev, busaddr, len, dir);
449 kfree(rd->buf);
450 rd->buf = NULL;
451 }
452 kfree(r);
453 return NULL;
454 }
455 rd_set_addr_status(rd, busaddr, 0);
456 /* initially, the dma buffer is owned by the CPU */
457 rd->skb = NULL;
458 }
459 return r;
460}
461
462static int vlsi_free_ring(struct vlsi_ring *r)
463{
464 struct ring_descr *rd;
465 unsigned i;
466 dma_addr_t busaddr;
467
468 for (i = 0; i < r->size; i++) {
469 rd = r->rd + i;
470 if (rd->skb)
471 dev_kfree_skb_any(rd->skb);
472 busaddr = rd_get_addr(rd);
473 rd_set_addr_status(rd, 0, 0);
474 if (busaddr)
475 pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
476 if (rd->buf)
477 kfree(rd->buf);
478 }
479 kfree(r);
480 return 0;
481}
482
483static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
484{
485 char *ringarea;
486 struct ring_descr_hw *hwmap;
487
488 idev->virtaddr = NULL;
489 idev->busaddr = 0;
490
491 ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr);
492 if (!ringarea) {
493 IRDA_ERROR("%s: insufficient memory for descriptor rings\n",
494 __FUNCTION__);
495 goto out;
496 }
497 memset(ringarea, 0, HW_RING_AREA_SIZE);
498
499 hwmap = (struct ring_descr_hw *)ringarea;
500 idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
501 XFER_BUF_SIZE, PCI_DMA_FROMDEVICE);
502 if (idev->rx_ring == NULL)
503 goto out_unmap;
504
505 hwmap += MAX_RING_DESCR;
506 idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0],
507 XFER_BUF_SIZE, PCI_DMA_TODEVICE);
508 if (idev->tx_ring == NULL)
509 goto out_free_rx;
510
511 idev->virtaddr = ringarea;
512 return 0;
513
514out_free_rx:
515 vlsi_free_ring(idev->rx_ring);
516out_unmap:
517 idev->rx_ring = idev->tx_ring = NULL;
518 pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr);
519 idev->busaddr = 0;
520out:
521 return -ENOMEM;
522}
523
524static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev)
525{
526 vlsi_free_ring(idev->rx_ring);
527 vlsi_free_ring(idev->tx_ring);
528 idev->rx_ring = idev->tx_ring = NULL;
529
530 if (idev->busaddr)
531 pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
532
533 idev->virtaddr = NULL;
534 idev->busaddr = 0;
535
536 return 0;
537}
538
539/********************************************************/
540
541static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
542{
543 u16 status;
544 int crclen, len = 0;
545 struct sk_buff *skb;
546 int ret = 0;
547 struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev);
548 vlsi_irda_dev_t *idev = ndev->priv;
549
550 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
551 /* dma buffer now owned by the CPU */
552 status = rd_get_status(rd);
553 if (status & RD_RX_ERROR) {
554 if (status & RD_RX_OVER)
555 ret |= VLSI_RX_OVER;
556 if (status & RD_RX_LENGTH)
557 ret |= VLSI_RX_LENGTH;
558 if (status & RD_RX_PHYERR)
559 ret |= VLSI_RX_FRAME;
560 if (status & RD_RX_CRCERR)
561 ret |= VLSI_RX_CRC;
562 goto done;
563 }
564
565 len = rd_get_count(rd);
566 crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
567 len -= crclen; /* remove trailing CRC */
568 if (len <= 0) {
569 IRDA_DEBUG(0, "%s: strange frame (len=%d)\n", __FUNCTION__, len);
570 ret |= VLSI_RX_DROP;
571 goto done;
572 }
573
574 if (idev->mode == IFF_SIR) { /* hw checks CRC in MIR, FIR mode */
575
576 /* rd->buf is a streaming PCI_DMA_FROMDEVICE map. Doing the
577 * endian-adjustment there just in place will dirty a cache line
578 * which belongs to the map and thus we must be sure it will
579 * get flushed before giving the buffer back to hardware.
580 * vlsi_fill_rx() will do this anyway - but here we rely on.
581 */
582 le16_to_cpus(rd->buf+len);
583 if (irda_calc_crc16(INIT_FCS,rd->buf,len+crclen) != GOOD_FCS) {
584 IRDA_DEBUG(0, "%s: crc error\n", __FUNCTION__);
585 ret |= VLSI_RX_CRC;
586 goto done;
587 }
588 }
589
590 if (!rd->skb) {
591 IRDA_WARNING("%s: rx packet lost\n", __FUNCTION__);
592 ret |= VLSI_RX_DROP;
593 goto done;
594 }
595
596 skb = rd->skb;
597 rd->skb = NULL;
598 skb->dev = ndev;
599 memcpy(skb_put(skb,len), rd->buf, len);
600 skb->mac.raw = skb->data;
601 if (in_interrupt())
602 netif_rx(skb);
603 else
604 netif_rx_ni(skb);
605 ndev->last_rx = jiffies;
606
607done:
608 rd_set_status(rd, 0);
609 rd_set_count(rd, 0);
610 /* buffer still owned by CPU */
611
612 return (ret) ? -ret : len;
613}
614
615static void vlsi_fill_rx(struct vlsi_ring *r)
616{
617 struct ring_descr *rd;
618
619 for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
620 if (rd_is_active(rd)) {
621 IRDA_WARNING("%s: driver bug: rx descr race with hw\n",
622 __FUNCTION__);
623 vlsi_ring_debug(r);
624 break;
625 }
626 if (!rd->skb) {
627 rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE);
628 if (rd->skb) {
629 skb_reserve(rd->skb,1);
630 rd->skb->protocol = htons(ETH_P_IRDA);
631 }
632 else
633 break; /* probably not worth logging? */
634 }
635 /* give dma buffer back to busmaster */
636 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
637 rd_activate(rd);
638 }
639}
640
641static void vlsi_rx_interrupt(struct net_device *ndev)
642{
643 vlsi_irda_dev_t *idev = ndev->priv;
644 struct vlsi_ring *r = idev->rx_ring;
645 struct ring_descr *rd;
646 int ret;
647
648 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
649
650 if (rd_is_active(rd))
651 break;
652
653 ret = vlsi_process_rx(r, rd);
654
655 if (ret < 0) {
656 ret = -ret;
657 idev->stats.rx_errors++;
658 if (ret & VLSI_RX_DROP)
659 idev->stats.rx_dropped++;
660 if (ret & VLSI_RX_OVER)
661 idev->stats.rx_over_errors++;
662 if (ret & VLSI_RX_LENGTH)
663 idev->stats.rx_length_errors++;
664 if (ret & VLSI_RX_FRAME)
665 idev->stats.rx_frame_errors++;
666 if (ret & VLSI_RX_CRC)
667 idev->stats.rx_crc_errors++;
668 }
669 else if (ret > 0) {
670 idev->stats.rx_packets++;
671 idev->stats.rx_bytes += ret;
672 }
673 }
674
675 do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
676
677 vlsi_fill_rx(r);
678
679 if (ring_first(r) == NULL) {
680 /* we are in big trouble, if this should ever happen */
681 IRDA_ERROR("%s: rx ring exhausted!\n", __FUNCTION__);
682 vlsi_ring_debug(r);
683 }
684 else
685 outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
686}
687
688/* caller must have stopped the controller from busmastering */
689
690static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
691{
692 struct vlsi_ring *r = idev->rx_ring;
693 struct ring_descr *rd;
694 int ret;
695
696 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
697
698 ret = 0;
699 if (rd_is_active(rd)) {
700 rd_set_status(rd, 0);
701 if (rd_get_count(rd)) {
702 IRDA_DEBUG(0, "%s - dropping rx packet\n", __FUNCTION__);
703 ret = -VLSI_RX_DROP;
704 }
705 rd_set_count(rd, 0);
706 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
707 if (rd->skb) {
708 dev_kfree_skb_any(rd->skb);
709 rd->skb = NULL;
710 }
711 }
712 else
713 ret = vlsi_process_rx(r, rd);
714
715 if (ret < 0) {
716 ret = -ret;
717 idev->stats.rx_errors++;
718 if (ret & VLSI_RX_DROP)
719 idev->stats.rx_dropped++;
720 if (ret & VLSI_RX_OVER)
721 idev->stats.rx_over_errors++;
722 if (ret & VLSI_RX_LENGTH)
723 idev->stats.rx_length_errors++;
724 if (ret & VLSI_RX_FRAME)
725 idev->stats.rx_frame_errors++;
726 if (ret & VLSI_RX_CRC)
727 idev->stats.rx_crc_errors++;
728 }
729 else if (ret > 0) {
730 idev->stats.rx_packets++;
731 idev->stats.rx_bytes += ret;
732 }
733 }
734}
735
736/********************************************************/
737
738static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd)
739{
740 u16 status;
741 int len;
742 int ret;
743
744 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
745 /* dma buffer now owned by the CPU */
746 status = rd_get_status(rd);
747 if (status & RD_TX_UNDRN)
748 ret = VLSI_TX_FIFO;
749 else
750 ret = 0;
751 rd_set_status(rd, 0);
752
753 if (rd->skb) {
754 len = rd->skb->len;
755 dev_kfree_skb_any(rd->skb);
756 rd->skb = NULL;
757 }
758 else /* tx-skb already freed? - should never happen */
759 len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */
760
761 rd_set_count(rd, 0);
762 /* dma buffer still owned by the CPU */
763
764 return (ret) ? -ret : len;
765}
766
767static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
768{
769 u16 nphyctl;
770 u16 config;
771 unsigned mode;
772 int ret;
773 int baudrate;
774 int fifocnt;
775
776 baudrate = idev->new_baud;
777 IRDA_DEBUG(2, "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud);
778 if (baudrate == 4000000) {
779 mode = IFF_FIR;
780 config = IRCFG_FIR;
781 nphyctl = PHYCTL_FIR;
782 }
783 else if (baudrate == 1152000) {
784 mode = IFF_MIR;
785 config = IRCFG_MIR | IRCFG_CRC16;
786 nphyctl = PHYCTL_MIR(clksrc==3);
787 }
788 else {
789 mode = IFF_SIR;
790 config = IRCFG_SIR | IRCFG_SIRFILT | IRCFG_RXANY;
791 switch(baudrate) {
792 default:
793 IRDA_WARNING("%s: undefined baudrate %d - fallback to 9600!\n",
794 __FUNCTION__, baudrate);
795 baudrate = 9600;
796 /* fallthru */
797 case 2400:
798 case 9600:
799 case 19200:
800 case 38400:
801 case 57600:
802 case 115200:
803 nphyctl = PHYCTL_SIR(baudrate,sirpulse,clksrc==3);
804 break;
805 }
806 }
807 config |= IRCFG_MSTR | IRCFG_ENRX;
808
809 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
810 if (fifocnt != 0) {
811 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);
812 }
813
814 outw(0, iobase+VLSI_PIO_IRENABLE);
815 outw(config, iobase+VLSI_PIO_IRCFG);
816 outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
817 wmb();
818 outw(IRENABLE_PHYANDCLOCK, iobase+VLSI_PIO_IRENABLE);
819 mb();
820
821 udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */
822
823 /* read back settings for validation */
824
825 config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
826
827 if (mode == IFF_FIR)
828 config ^= IRENABLE_FIR_ON;
829 else if (mode == IFF_MIR)
830 config ^= (IRENABLE_MIR_ON|IRENABLE_CRC16_ON);
831 else
832 config ^= IRENABLE_SIR_ON;
833
834 if (config != (IRENABLE_PHYANDCLOCK|IRENABLE_ENRXST)) {
835 IRDA_WARNING("%s: failed to set %s mode!\n", __FUNCTION__,
836 (mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
837 ret = -1;
838 }
839 else {
840 if (inw(iobase+VLSI_PIO_PHYCTL) != nphyctl) {
841 IRDA_WARNING("%s: failed to apply baudrate %d\n",
842 __FUNCTION__, baudrate);
843 ret = -1;
844 }
845 else {
846 idev->mode = mode;
847 idev->baud = baudrate;
848 idev->new_baud = 0;
849 ret = 0;
850 }
851 }
852
853 if (ret)
854 vlsi_reg_debug(iobase,__FUNCTION__);
855
856 return ret;
857}
858
859static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
860{
861 vlsi_irda_dev_t *idev = ndev->priv;
862 struct vlsi_ring *r = idev->tx_ring;
863 struct ring_descr *rd;
864 unsigned long flags;
865 unsigned iobase = ndev->base_addr;
866 u8 status;
867 u16 config;
868 int mtt;
869 int len, speed;
870 struct timeval now, ready;
871 char *msg = NULL;
872
873 speed = irda_get_next_speed(skb);
874 spin_lock_irqsave(&idev->lock, flags);
875 if (speed != -1 && speed != idev->baud) {
876 netif_stop_queue(ndev);
877 idev->new_baud = speed;
878 status = RD_TX_CLRENTX; /* stop tx-ring after this frame */
879 }
880 else
881 status = 0;
882
883 if (skb->len == 0) {
884 /* handle zero packets - should be speed change */
885 if (status == 0) {
886 msg = "bogus zero-length packet";
887 goto drop_unlock;
888 }
889
890 /* due to the completely asynch tx operation we might have
891 * IrLAP racing with the hardware here, f.e. if the controller
892 * is just sending the last packet with current speed while
893 * the LAP is already switching the speed using synchronous
894 * len=0 packet. Immediate execution would lead to hw lockup
895 * requiring a powercycle to reset. Good candidate to trigger
896 * this is the final UA:RSP packet after receiving a DISC:CMD
897 * when getting the LAP down.
898 * Note that we are not protected by the queue_stop approach
899 * because the final UA:RSP arrives _without_ request to apply
900 * new-speed-after-this-packet - hence the driver doesn't know
901 * this was the last packet and doesn't stop the queue. So the
902 * forced switch to default speed from LAP gets through as fast
903 * as only some 10 usec later while the UA:RSP is still processed
904 * by the hardware and we would get screwed.
905 */
906
907 if (ring_first(idev->tx_ring) == NULL) {
908 /* no race - tx-ring already empty */
909 vlsi_set_baud(idev, iobase);
910 netif_wake_queue(ndev);
911 }
912 else
913 ;
914 /* keep the speed change pending like it would
915 * for any len>0 packet. tx completion interrupt
916 * will apply it when the tx ring becomes empty.
917 */
918 spin_unlock_irqrestore(&idev->lock, flags);
919 dev_kfree_skb_any(skb);
920 return 0;
921 }
922
923 /* sanity checks - simply drop the packet */
924
925 rd = ring_last(r);
926 if (!rd) {
927 msg = "ring full, but queue wasn't stopped";
928 goto drop_unlock;
929 }
930
931 if (rd_is_active(rd)) {
932 msg = "entry still owned by hw";
933 goto drop_unlock;
934 }
935
936 if (!rd->buf) {
937 msg = "tx ring entry without pci buffer";
938 goto drop_unlock;
939 }
940
941 if (rd->skb) {
942 msg = "ring entry with old skb still attached";
943 goto drop_unlock;
944 }
945
946 /* no need for serialization or interrupt disable during mtt */
947 spin_unlock_irqrestore(&idev->lock, flags);
948
949 if ((mtt = irda_get_mtt(skb)) > 0) {
950
951 ready.tv_usec = idev->last_rx.tv_usec + mtt;
952 ready.tv_sec = idev->last_rx.tv_sec;
953 if (ready.tv_usec >= 1000000) {
954 ready.tv_usec -= 1000000;
955 ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
956 }
957 for(;;) {
958 do_gettimeofday(&now);
959 if (now.tv_sec > ready.tv_sec
960 || (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
961 break;
962 udelay(100);
963 /* must not sleep here - we are called under xmit_lock! */
964 }
965 }
966
967 /* tx buffer already owned by CPU due to pci_dma_sync_single_for_cpu()
968 * after subsequent tx-completion
969 */
970
971 if (idev->mode == IFF_SIR) {
972 status |= RD_TX_DISCRC; /* no hw-crc creation */
973 len = async_wrap_skb(skb, rd->buf, r->len);
974
975 /* Some rare worst case situation in SIR mode might lead to
976 * potential buffer overflow. The wrapper detects this, returns
977 * with a shortened frame (without FCS/EOF) but doesn't provide
978 * any error indication about the invalid packet which we are
979 * going to transmit.
980 * Therefore we log if the buffer got filled to the point, where the
981 * wrapper would abort, i.e. when there are less than 5 bytes left to
982 * allow appending the FCS/EOF.
983 */
984
985 if (len >= r->len-5)
986 IRDA_WARNING("%s: possible buffer overflow with SIR wrapping!\n",
987 __FUNCTION__);
988 }
989 else {
990 /* hw deals with MIR/FIR mode wrapping */
991 status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */
992 len = skb->len;
993 if (len > r->len) {
994 msg = "frame exceeds tx buffer length";
995 goto drop;
996 }
997 else
998 memcpy(rd->buf, skb->data, len);
999 }
1000
1001 rd->skb = skb; /* remember skb for tx-complete stats */
1002
1003 rd_set_count(rd, len);
1004 rd_set_status(rd, status); /* not yet active! */
1005
1006 /* give dma buffer back to busmaster-hw (flush caches to make
1007 * CPU-driven changes visible from the pci bus).
1008 */
1009
1010 pci_dma_sync_single_for_device(r->pdev, rd_get_addr(rd), r->len, r->dir);
1011
1012/* Switching to TX mode here races with the controller
1013 * which may stop TX at any time when fetching an inactive descriptor
1014 * or one with CLR_ENTX set. So we switch on TX only, if TX was not running
1015 * _after_ the new descriptor was activated on the ring. This ensures
1016 * we will either find TX already stopped or we can be sure, there
1017 * will be a TX-complete interrupt even if the chip stopped doing
1018 * TX just after we found it still running. The ISR will then find
1019 * the non-empty ring and restart TX processing. The enclosing
1020 * spinlock provides the correct serialization to prevent race with isr.
1021 */
1022
1023 spin_lock_irqsave(&idev->lock,flags);
1024
1025 rd_activate(rd);
1026
1027 if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
1028 int fifocnt;
1029
1030 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
1031 if (fifocnt != 0) {
1032 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);
1033 }
1034
1035 config = inw(iobase+VLSI_PIO_IRCFG);
1036 mb();
1037 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
1038 wmb();
1039 outw(0, iobase+VLSI_PIO_PROMPT);
1040 }
1041 ndev->trans_start = jiffies;
1042
1043 if (ring_put(r) == NULL) {
1044 netif_stop_queue(ndev);
1045 IRDA_DEBUG(3, "%s: tx ring full - queue stopped\n", __FUNCTION__);
1046 }
1047 spin_unlock_irqrestore(&idev->lock, flags);
1048
1049 return 0;
1050
1051drop_unlock:
1052 spin_unlock_irqrestore(&idev->lock, flags);
1053drop:
1054 IRDA_WARNING("%s: dropping packet - %s\n", __FUNCTION__, msg);
1055 dev_kfree_skb_any(skb);
1056 idev->stats.tx_errors++;
1057 idev->stats.tx_dropped++;
1058 /* Don't even think about returning NET_XMIT_DROP (=1) here!
1059 * In fact any retval!=0 causes the packet scheduler to requeue the
1060 * packet for later retry of transmission - which isn't exactly
1061 * what we want after we've just called dev_kfree_skb_any ;-)
1062 */
1063 return 0;
1064}
1065
1066static void vlsi_tx_interrupt(struct net_device *ndev)
1067{
1068 vlsi_irda_dev_t *idev = ndev->priv;
1069 struct vlsi_ring *r = idev->tx_ring;
1070 struct ring_descr *rd;
1071 unsigned iobase;
1072 int ret;
1073 u16 config;
1074
1075 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
1076
1077 if (rd_is_active(rd))
1078 break;
1079
1080 ret = vlsi_process_tx(r, rd);
1081
1082 if (ret < 0) {
1083 ret = -ret;
1084 idev->stats.tx_errors++;
1085 if (ret & VLSI_TX_DROP)
1086 idev->stats.tx_dropped++;
1087 if (ret & VLSI_TX_FIFO)
1088 idev->stats.tx_fifo_errors++;
1089 }
1090 else if (ret > 0){
1091 idev->stats.tx_packets++;
1092 idev->stats.tx_bytes += ret;
1093 }
1094 }
1095
1096 iobase = ndev->base_addr;
1097
1098 if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */
1099 vlsi_set_baud(idev, iobase);
1100
1101 config = inw(iobase+VLSI_PIO_IRCFG);
1102 if (rd == NULL) /* tx ring empty: re-enable rx */
1103 outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
1104
1105 else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
1106 int fifocnt;
1107
1108 fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
1109 if (fifocnt != 0) {
1110 IRDA_DEBUG(0, "%s: rx fifo not empty(%d)\n",
1111 __FUNCTION__, fifocnt);
1112 }
1113 outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
1114 }
1115
1116 outw(0, iobase+VLSI_PIO_PROMPT);
1117
1118 if (netif_queue_stopped(ndev) && !idev->new_baud) {
1119 netif_wake_queue(ndev);
1120 IRDA_DEBUG(3, "%s: queue awoken\n", __FUNCTION__);
1121 }
1122}
1123
1124/* caller must have stopped the controller from busmastering */
1125
1126static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
1127{
1128 struct vlsi_ring *r = idev->tx_ring;
1129 struct ring_descr *rd;
1130 int ret;
1131
1132 for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
1133
1134 ret = 0;
1135 if (rd_is_active(rd)) {
1136 rd_set_status(rd, 0);
1137 rd_set_count(rd, 0);
1138 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
1139 if (rd->skb) {
1140 dev_kfree_skb_any(rd->skb);
1141 rd->skb = NULL;
1142 }
1143 IRDA_DEBUG(0, "%s - dropping tx packet\n", __FUNCTION__);
1144 ret = -VLSI_TX_DROP;
1145 }
1146 else
1147 ret = vlsi_process_tx(r, rd);
1148
1149 if (ret < 0) {
1150 ret = -ret;
1151 idev->stats.tx_errors++;
1152 if (ret & VLSI_TX_DROP)
1153 idev->stats.tx_dropped++;
1154 if (ret & VLSI_TX_FIFO)
1155 idev->stats.tx_fifo_errors++;
1156 }
1157 else if (ret > 0){
1158 idev->stats.tx_packets++;
1159 idev->stats.tx_bytes += ret;
1160 }
1161 }
1162
1163}
1164
1165/********************************************************/
1166
1167static int vlsi_start_clock(struct pci_dev *pdev)
1168{
1169 u8 clkctl, lock;
1170 int i, count;
1171
1172 if (clksrc < 2) { /* auto or PLL: try PLL */
1173 clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP;
1174 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
1175
1176 /* procedure to detect PLL lock synchronisation:
1177 * after 0.5 msec initial delay we expect to find 3 PLL lock
1178 * indications within 10 msec for successful PLL detection.
1179 */
1180 udelay(500);
1181 count = 0;
1182 for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
1183 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
1184 if (lock&CLKCTL_LOCK) {
1185 if (++count >= 3)
1186 break;
1187 }
1188 udelay(50);
1189 }
1190 if (count < 3) {
1191 if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
1192 IRDA_ERROR("%s: no PLL or failed to lock!\n",
1193 __FUNCTION__);
1194 clkctl = CLKCTL_CLKSTP;
1195 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
1196 return -1;
1197 }
1198 else /* was: clksrc=0(auto) */
1199 clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
1200
1201 IRDA_DEBUG(0, "%s: PLL not locked, fallback to clksrc=%d\n",
1202 __FUNCTION__, clksrc);
1203 }
1204 else
1205 clksrc = 1; /* got successful PLL lock */
1206 }
1207
1208 if (clksrc != 1) {
1209 /* we get here if either no PLL detected in auto-mode or
1210 an external clock source was explicitly specified */
1211
1212 clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
1213 if (clksrc == 3)
1214 clkctl |= CLKCTL_XCKSEL;
1215 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
1216
1217 /* no way to test for working XCLK */
1218 }
1219 else
1220 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
1221
1222 /* ok, now going to connect the chip with the clock source */
1223
1224 clkctl &= ~CLKCTL_CLKSTP;
1225 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
1226
1227 return 0;
1228}
1229
1230static void vlsi_stop_clock(struct pci_dev *pdev)
1231{
1232 u8 clkctl;
1233
1234 /* disconnect chip from clock source */
1235 pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
1236 clkctl |= CLKCTL_CLKSTP;
1237 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
1238
1239 /* disable all clock sources */
1240 clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV);
1241 pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
1242}
1243
1244/********************************************************/
1245
1246/* writing all-zero to the VLSI PCI IO register area seems to prevent
1247 * some occasional situations where the hardware fails (symptoms are
1248 * what appears as stalled tx/rx state machines, i.e. everything ok for
1249 * receive or transmit but hw makes no progress or is unable to access
1250 * the bus memory locations).
1251 * Best place to call this is immediately after/before the internal clock
1252 * gets started/stopped.
1253 */
1254
1255static inline void vlsi_clear_regs(unsigned iobase)
1256{
1257 unsigned i;
1258 const unsigned chip_io_extent = 32;
1259
1260 for (i = 0; i < chip_io_extent; i += sizeof(u16))
1261 outw(0, iobase + i);
1262}
1263
1264static int vlsi_init_chip(struct pci_dev *pdev)
1265{
1266 struct net_device *ndev = pci_get_drvdata(pdev);
1267 vlsi_irda_dev_t *idev = ndev->priv;
1268 unsigned iobase;
1269 u16 ptr;
1270
1271 /* start the clock and clean the registers */
1272
1273 if (vlsi_start_clock(pdev)) {
1274 IRDA_ERROR("%s: no valid clock source\n", __FUNCTION__);
1275 return -1;
1276 }
1277 iobase = ndev->base_addr;
1278 vlsi_clear_regs(iobase);
1279
1280 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
1281
1282 outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */
1283
1284 /* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */
1285
1286 outw(0, iobase+VLSI_PIO_IRCFG);
1287 wmb();
1288
1289 outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */
1290
1291 outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
1292
1293 outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size),
1294 iobase+VLSI_PIO_RINGSIZE);
1295
1296 ptr = inw(iobase+VLSI_PIO_RINGPTR);
1297 atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
1298 atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
1299 atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
1300 atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
1301
1302 vlsi_set_baud(idev, iobase); /* idev->new_baud used as provided by caller */
1303
1304 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */
1305 wmb();
1306
1307 /* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
1308 * basically every received pulse fires an ACTIVITY-INT
1309 * leading to >>1000 INT's per second instead of few 10
1310 */
1311
1312 outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
1313
1314 return 0;
1315}
1316
1317static int vlsi_start_hw(vlsi_irda_dev_t *idev)
1318{
1319 struct pci_dev *pdev = idev->pdev;
1320 struct net_device *ndev = pci_get_drvdata(pdev);
1321 unsigned iobase = ndev->base_addr;
1322 u8 byte;
1323
1324 /* we don't use the legacy UART, disable its address decoding */
1325
1326 pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
1327 byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
1328 pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
1329
1330 /* enable PCI busmaster access to our 16MB page */
1331
1332 pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
1333 pci_set_master(pdev);
1334
1335 if (vlsi_init_chip(pdev) < 0) {
1336 pci_disable_device(pdev);
1337 return -1;
1338 }
1339
1340 vlsi_fill_rx(idev->rx_ring);
1341
1342 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
1343
1344 outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */
1345
1346 return 0;
1347}
1348
1349static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
1350{
1351 struct pci_dev *pdev = idev->pdev;
1352 struct net_device *ndev = pci_get_drvdata(pdev);
1353 unsigned iobase = ndev->base_addr;
1354 unsigned long flags;
1355
1356 spin_lock_irqsave(&idev->lock,flags);
1357 outw(0, iobase+VLSI_PIO_IRENABLE);
1358 outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */
1359
1360 /* disable and w/c irqs */
1361 outb(0, iobase+VLSI_PIO_IRINTR);
1362 wmb();
1363 outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR);
1364 spin_unlock_irqrestore(&idev->lock,flags);
1365
1366 vlsi_unarm_tx(idev);
1367 vlsi_unarm_rx(idev);
1368
1369 vlsi_clear_regs(iobase);
1370 vlsi_stop_clock(pdev);
1371
1372 pci_disable_device(pdev);
1373
1374 return 0;
1375}
1376
1377/**************************************************************/
1378
1379static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
1380{
1381 vlsi_irda_dev_t *idev = ndev->priv;
1382
1383 return &idev->stats;
1384}
1385
1386static void vlsi_tx_timeout(struct net_device *ndev)
1387{
1388 vlsi_irda_dev_t *idev = ndev->priv;
1389
1390
1391 vlsi_reg_debug(ndev->base_addr, __FUNCTION__);
1392 vlsi_ring_debug(idev->tx_ring);
1393
1394 if (netif_running(ndev))
1395 netif_stop_queue(ndev);
1396
1397 vlsi_stop_hw(idev);
1398
1399 /* now simply restart the whole thing */
1400
1401 if (!idev->new_baud)
1402 idev->new_baud = idev->baud; /* keep current baudrate */
1403
1404 if (vlsi_start_hw(idev))
1405 IRDA_ERROR("%s: failed to restart hw - %s(%s) unusable!\n",
1406 __FUNCTION__, PCIDEV_NAME(idev->pdev), ndev->name);
1407 else
1408 netif_start_queue(ndev);
1409}
1410
1411static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1412{
1413 vlsi_irda_dev_t *idev = ndev->priv;
1414 struct if_irda_req *irq = (struct if_irda_req *) rq;
1415 unsigned long flags;
1416 u16 fifocnt;
1417 int ret = 0;
1418
1419 switch (cmd) {
1420 case SIOCSBANDWIDTH:
1421 if (!capable(CAP_NET_ADMIN)) {
1422 ret = -EPERM;
1423 break;
1424 }
1425 spin_lock_irqsave(&idev->lock, flags);
1426 idev->new_baud = irq->ifr_baudrate;
1427 /* when called from userland there might be a minor race window here
1428 * if the stack tries to change speed concurrently - which would be
1429 * pretty strange anyway with the userland having full control...
1430 */
1431 vlsi_set_baud(idev, ndev->base_addr);
1432 spin_unlock_irqrestore(&idev->lock, flags);
1433 break;
1434 case SIOCSMEDIABUSY:
1435 if (!capable(CAP_NET_ADMIN)) {
1436 ret = -EPERM;
1437 break;
1438 }
1439 irda_device_set_media_busy(ndev, TRUE);
1440 break;
1441 case SIOCGRECEIVING:
1442 /* the best we can do: check whether there are any bytes in rx fifo.
1443 * The trustable window (in case some data arrives just afterwards)
1444 * may be as short as 1usec or so at 4Mbps.
1445 */
1446 fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
1447 irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
1448 break;
1449 default:
1450 IRDA_WARNING("%s: notsupp - cmd=%04x\n",
1451 __FUNCTION__, cmd);
1452 ret = -EOPNOTSUPP;
1453 }
1454
1455 return ret;
1456}
1457
1458/********************************************************/
1459
1460static irqreturn_t vlsi_interrupt(int irq, void *dev_instance,
1461 struct pt_regs *regs)
1462{
1463 struct net_device *ndev = dev_instance;
1464 vlsi_irda_dev_t *idev = ndev->priv;
1465 unsigned iobase;
1466 u8 irintr;
1467 int boguscount = 5;
1468 unsigned long flags;
1469 int handled = 0;
1470
1471 iobase = ndev->base_addr;
1472 spin_lock_irqsave(&idev->lock,flags);
1473 do {
1474 irintr = inb(iobase+VLSI_PIO_IRINTR);
1475 mb();
1476 outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */
1477
1478 if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */
1479 break;
1480
1481 handled = 1;
1482
1483 if (unlikely(!(irintr & ~IRINTR_ACTIVITY)))
1484 break; /* nothing todo if only activity */
1485
1486 if (irintr&IRINTR_RPKTINT)
1487 vlsi_rx_interrupt(ndev);
1488
1489 if (irintr&IRINTR_TPKTINT)
1490 vlsi_tx_interrupt(ndev);
1491
1492 } while (--boguscount > 0);
1493 spin_unlock_irqrestore(&idev->lock,flags);
1494
1495 if (boguscount <= 0)
1496 IRDA_MESSAGE("%s: too much work in interrupt!\n",
1497 __FUNCTION__);
1498 return IRQ_RETVAL(handled);
1499}
1500
1501/********************************************************/
1502
1503static int vlsi_open(struct net_device *ndev)
1504{
1505 vlsi_irda_dev_t *idev = ndev->priv;
1506 int err = -EAGAIN;
1507 char hwname[32];
1508
1509 if (pci_request_regions(idev->pdev, drivername)) {
1510 IRDA_WARNING("%s: io resource busy\n", __FUNCTION__);
1511 goto errout;
1512 }
1513 ndev->base_addr = pci_resource_start(idev->pdev,0);
1514 ndev->irq = idev->pdev->irq;
1515
1516 /* under some rare occasions the chip apparently comes up with
1517 * IRQ's pending. We better w/c pending IRQ and disable them all
1518 */
1519
1520 outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
1521
1522 if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ,
1523 drivername, ndev)) {
1524 IRDA_WARNING("%s: couldn't get IRQ: %d\n",
1525 __FUNCTION__, ndev->irq);
1526 goto errout_io;
1527 }
1528
1529 if ((err = vlsi_create_hwif(idev)) != 0)
1530 goto errout_irq;
1531
1532 sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
1533 idev->irlap = irlap_open(ndev,&idev->qos,hwname);
1534 if (!idev->irlap)
1535 goto errout_free_ring;
1536
1537 do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
1538
1539 idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
1540
1541 if ((err = vlsi_start_hw(idev)) != 0)
1542 goto errout_close_irlap;
1543
1544 netif_start_queue(ndev);
1545
1546 IRDA_MESSAGE("%s: device %s operational\n", __FUNCTION__, ndev->name);
1547
1548 return 0;
1549
1550errout_close_irlap:
1551 irlap_close(idev->irlap);
1552errout_free_ring:
1553 vlsi_destroy_hwif(idev);
1554errout_irq:
1555 free_irq(ndev->irq,ndev);
1556errout_io:
1557 pci_release_regions(idev->pdev);
1558errout:
1559 return err;
1560}
1561
1562static int vlsi_close(struct net_device *ndev)
1563{
1564 vlsi_irda_dev_t *idev = ndev->priv;
1565
1566 netif_stop_queue(ndev);
1567
1568 if (idev->irlap)
1569 irlap_close(idev->irlap);
1570 idev->irlap = NULL;
1571
1572 vlsi_stop_hw(idev);
1573
1574 vlsi_destroy_hwif(idev);
1575
1576 free_irq(ndev->irq,ndev);
1577
1578 pci_release_regions(idev->pdev);
1579
1580 IRDA_MESSAGE("%s: device %s stopped\n", __FUNCTION__, ndev->name);
1581
1582 return 0;
1583}
1584
1585static int vlsi_irda_init(struct net_device *ndev)
1586{
1587 vlsi_irda_dev_t *idev = ndev->priv;
1588 struct pci_dev *pdev = idev->pdev;
1589
1590 SET_MODULE_OWNER(ndev);
1591
1592 ndev->irq = pdev->irq;
1593 ndev->base_addr = pci_resource_start(pdev,0);
1594
1595 /* PCI busmastering
1596 * see include file for details why we need these 2 masks, in this order!
1597 */
1598
1599 if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)
1600 || pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
1601 IRDA_ERROR("%s: aborting due to PCI BM-DMA address limitations\n", __FUNCTION__);
1602 return -1;
1603 }
1604
1605 irda_init_max_qos_capabilies(&idev->qos);
1606
1607 /* the VLSI82C147 does not support 576000! */
1608
1609 idev->qos.baud_rate.bits = IR_2400 | IR_9600
1610 | IR_19200 | IR_38400 | IR_57600 | IR_115200
1611 | IR_1152000 | (IR_4000000 << 8);
1612
1613 idev->qos.min_turn_time.bits = qos_mtt_bits;
1614
1615 irda_qos_bits_to_value(&idev->qos);
1616
1617 /* currently no public media definitions for IrDA */
1618
1619 ndev->flags |= IFF_PORTSEL | IFF_AUTOMEDIA;
1620 ndev->if_port = IF_PORT_UNKNOWN;
1621
1622 ndev->open = vlsi_open;
1623 ndev->stop = vlsi_close;
1624 ndev->get_stats = vlsi_get_stats;
1625 ndev->hard_start_xmit = vlsi_hard_start_xmit;
1626 ndev->do_ioctl = vlsi_ioctl;
1627 ndev->tx_timeout = vlsi_tx_timeout;
1628 ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */
1629
1630 SET_NETDEV_DEV(ndev, &pdev->dev);
1631
1632 return 0;
1633}
1634
1635/**************************************************************/
1636
1637static int __devinit
1638vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1639{
1640 struct net_device *ndev;
1641 vlsi_irda_dev_t *idev;
1642
1643 if (pci_enable_device(pdev))
1644 goto out;
1645 else
1646 pdev->current_state = 0; /* hw must be running now */
1647
1648 IRDA_MESSAGE("%s: IrDA PCI controller %s detected\n",
1649 drivername, PCIDEV_NAME(pdev));
1650
1651 if ( !pci_resource_start(pdev,0)
1652 || !(pci_resource_flags(pdev,0) & IORESOURCE_IO) ) {
1653 IRDA_ERROR("%s: bar 0 invalid", __FUNCTION__);
1654 goto out_disable;
1655 }
1656
1657 ndev = alloc_irdadev(sizeof(*idev));
1658 if (ndev==NULL) {
1659 IRDA_ERROR("%s: Unable to allocate device memory.\n",
1660 __FUNCTION__);
1661 goto out_disable;
1662 }
1663
1664 idev = ndev->priv;
1665
1666 spin_lock_init(&idev->lock);
1667 init_MUTEX(&idev->sem);
1668 down(&idev->sem);
1669 idev->pdev = pdev;
1670
1671 if (vlsi_irda_init(ndev) < 0)
1672 goto out_freedev;
1673
1674 if (register_netdev(ndev) < 0) {
1675 IRDA_ERROR("%s: register_netdev failed\n", __FUNCTION__);
1676 goto out_freedev;
1677 }
1678
1679 if (vlsi_proc_root != NULL) {
1680 struct proc_dir_entry *ent;
1681
1682 ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root);
1683 if (!ent) {
1684 IRDA_WARNING("%s: failed to create proc entry\n",
1685 __FUNCTION__);
1686 } else {
1687 ent->data = ndev;
1688 ent->proc_fops = VLSI_PROC_FOPS;
1689 ent->size = 0;
1690 }
1691 idev->proc_entry = ent;
1692 }
1693 IRDA_MESSAGE("%s: registered device %s\n", drivername, ndev->name);
1694
1695 pci_set_drvdata(pdev, ndev);
1696 up(&idev->sem);
1697
1698 return 0;
1699
1700out_freedev:
1701 up(&idev->sem);
1702 free_netdev(ndev);
1703out_disable:
1704 pci_disable_device(pdev);
1705out:
1706 pci_set_drvdata(pdev, NULL);
1707 return -ENODEV;
1708}
1709
1710static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
1711{
1712 struct net_device *ndev = pci_get_drvdata(pdev);
1713 vlsi_irda_dev_t *idev;
1714
1715 if (!ndev) {
1716 IRDA_ERROR("%s: lost netdevice?\n", drivername);
1717 return;
1718 }
1719
1720 unregister_netdev(ndev);
1721
1722 idev = ndev->priv;
1723 down(&idev->sem);
1724 if (idev->proc_entry) {
1725 remove_proc_entry(ndev->name, vlsi_proc_root);
1726 idev->proc_entry = NULL;
1727 }
1728 up(&idev->sem);
1729
1730 free_netdev(ndev);
1731
1732 pci_set_drvdata(pdev, NULL);
1733
1734 IRDA_MESSAGE("%s: %s removed\n", drivername, PCIDEV_NAME(pdev));
1735}
1736
1737#ifdef CONFIG_PM
1738
1739/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs.
1740 * Some of the Linux PCI-PM code however depends on this, for example in
1741 * pci_set_power_state(). So we have to take care to perform the required
1742 * operations on our own (particularly reflecting the pdev->current_state)
1743 * otherwise we might get cheated by pci-pm.
1744 */
1745
1746
1747static int vlsi_irda_suspend(struct pci_dev *pdev, u32 state)
1748{
1749 struct net_device *ndev = pci_get_drvdata(pdev);
1750 vlsi_irda_dev_t *idev;
1751
1752 if (state < 1 || state > 3 ) {
1753 IRDA_ERROR("%s - %s: invalid pm state request: %u\n",
1754 __FUNCTION__, PCIDEV_NAME(pdev), state);
1755 return 0;
1756 }
1757 if (!ndev) {
1758 IRDA_ERROR("%s - %s: no netdevice \n",
1759 __FUNCTION__, PCIDEV_NAME(pdev));
1760 return 0;
1761 }
1762 idev = ndev->priv;
1763 down(&idev->sem);
1764 if (pdev->current_state != 0) { /* already suspended */
1765 if (state > pdev->current_state) { /* simply go deeper */
1766 pci_set_power_state(pdev,state);
1767 pdev->current_state = state;
1768 }
1769 else
1770 IRDA_ERROR("%s - %s: invalid suspend request %u -> %u\n", __FUNCTION__, PCIDEV_NAME(pdev), pdev->current_state, state);
1771 up(&idev->sem);
1772 return 0;
1773 }
1774
1775 if (netif_running(ndev)) {
1776 netif_device_detach(ndev);
1777 vlsi_stop_hw(idev);
1778 pci_save_state(pdev);
1779 if (!idev->new_baud)
1780 /* remember speed settings to restore on resume */
1781 idev->new_baud = idev->baud;
1782 }
1783
1784 pci_set_power_state(pdev,state);
1785 pdev->current_state = state;
1786 idev->resume_ok = 1;
1787 up(&idev->sem);
1788 return 0;
1789}
1790
1791static int vlsi_irda_resume(struct pci_dev *pdev)
1792{
1793 struct net_device *ndev = pci_get_drvdata(pdev);
1794 vlsi_irda_dev_t *idev;
1795
1796 if (!ndev) {
1797 IRDA_ERROR("%s - %s: no netdevice \n",
1798 __FUNCTION__, PCIDEV_NAME(pdev));
1799 return 0;
1800 }
1801 idev = ndev->priv;
1802 down(&idev->sem);
1803 if (pdev->current_state == 0) {
1804 up(&idev->sem);
1805 IRDA_WARNING("%s - %s: already resumed\n",
1806 __FUNCTION__, PCIDEV_NAME(pdev));
1807 return 0;
1808 }
1809
1810 pci_set_power_state(pdev, 0);
1811 pdev->current_state = 0;
1812
1813 if (!idev->resume_ok) {
1814 /* should be obsolete now - but used to happen due to:
1815 * - pci layer initially setting pdev->current_state = 4 (unknown)
1816 * - pci layer did not walk the save_state-tree (might be APM problem)
1817 * so we could not refuse to suspend from undefined state
1818 * - vlsi_irda_suspend detected invalid state and refused to save
1819 * configuration for resume - but was too late to stop suspending
1820 * - vlsi_irda_resume got screwed when trying to resume from garbage
1821 *
1822 * now we explicitly set pdev->current_state = 0 after enabling the
1823 * device and independently resume_ok should catch any garbage config.
1824 */
1825 IRDA_WARNING("%s - hm, nothing to resume?\n", __FUNCTION__);
1826 up(&idev->sem);
1827 return 0;
1828 }
1829
1830 if (netif_running(ndev)) {
1831 pci_restore_state(pdev);
1832 vlsi_start_hw(idev);
1833 netif_device_attach(ndev);
1834 }
1835 idev->resume_ok = 0;
1836 up(&idev->sem);
1837 return 0;
1838}
1839
1840#endif /* CONFIG_PM */
1841
1842/*********************************************************/
1843
1844static struct pci_driver vlsi_irda_driver = {
1845 .name = drivername,
1846 .id_table = vlsi_irda_table,
1847 .probe = vlsi_irda_probe,
1848 .remove = __devexit_p(vlsi_irda_remove),
1849#ifdef CONFIG_PM
1850 .suspend = vlsi_irda_suspend,
1851 .resume = vlsi_irda_resume,
1852#endif
1853};
1854
1855#define PROC_DIR ("driver/" DRIVER_NAME)
1856
1857static int __init vlsi_mod_init(void)
1858{
1859 int i, ret;
1860
1861 if (clksrc < 0 || clksrc > 3) {
1862 IRDA_ERROR("%s: invalid clksrc=%d\n", drivername, clksrc);
1863 return -1;
1864 }
1865
1866 for (i = 0; i < 2; i++) {
1867 switch(ringsize[i]) {
1868 case 4:
1869 case 8:
1870 case 16:
1871 case 32:
1872 case 64:
1873 break;
1874 default:
1875 IRDA_WARNING("%s: invalid %s ringsize %d, using default=8", drivername, (i)?"rx":"tx", ringsize[i]);
1876 ringsize[i] = 8;
1877 break;
1878 }
1879 }
1880
1881 sirpulse = !!sirpulse;
1882
1883 /* create_proc_entry returns NULL if !CONFIG_PROC_FS.
1884 * Failure to create the procfs entry is handled like running
1885 * without procfs - it's not required for the driver to work.
1886 */
1887 vlsi_proc_root = create_proc_entry(PROC_DIR, S_IFDIR, NULL);
1888 if (vlsi_proc_root) {
1889 /* protect registered procdir against module removal.
1890 * Because we are in the module init path there's no race
1891 * window after create_proc_entry (and no barrier needed).
1892 */
1893 vlsi_proc_root->owner = THIS_MODULE;
1894 }
1895
1896 ret = pci_module_init(&vlsi_irda_driver);
1897
1898 if (ret && vlsi_proc_root)
1899 remove_proc_entry(PROC_DIR, NULL);
1900 return ret;
1901
1902}
1903
1904static void __exit vlsi_mod_exit(void)
1905{
1906 pci_unregister_driver(&vlsi_irda_driver);
1907 if (vlsi_proc_root)
1908 remove_proc_entry(PROC_DIR, NULL);
1909}
1910
1911module_init(vlsi_mod_init);
1912module_exit(vlsi_mod_exit);
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
new file mode 100644
index 000000000000..414694abf588
--- /dev/null
+++ b/drivers/net/irda/vlsi_ir.h
@@ -0,0 +1,798 @@
1
2/*********************************************************************
3 *
4 * vlsi_ir.h: VLSI82C147 PCI IrDA controller driver for Linux
5 *
6 * Version: 0.5
7 *
8 * Copyright (c) 2001-2003 Martin Diehl
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of
13 * the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23 * MA 02111-1307 USA
24 *
25 ********************************************************************/
26
27#ifndef IRDA_VLSI_FIR_H
28#define IRDA_VLSI_FIR_H
29
30/* ================================================================
31 * compatibility stuff
32 */
33
34/* definitions not present in pci_ids.h */
35
36#ifndef PCI_CLASS_WIRELESS_IRDA
37#define PCI_CLASS_WIRELESS_IRDA 0x0d00
38#endif
39
40#ifndef PCI_CLASS_SUBCLASS_MASK
41#define PCI_CLASS_SUBCLASS_MASK 0xffff
42#endif
43
44/* in recent 2.5 interrupt handlers have non-void return value */
45#ifndef IRQ_RETVAL
46typedef void irqreturn_t;
47#define IRQ_NONE
48#define IRQ_HANDLED
49#define IRQ_RETVAL(x)
50#endif
51
52/* some stuff need to check kernelversion. Not all 2.5 stuff was present
53 * in early 2.5.x - the test is merely to separate 2.4 from 2.5
54 */
55#include <linux/version.h>
56
57#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
58
59/* PDE() introduced in 2.5.4 */
60#ifdef CONFIG_PROC_FS
61#define PDE(inode) ((inode)->u.generic_ip)
62#endif
63
64/* irda crc16 calculation exported in 2.5.42 */
65#define irda_calc_crc16(fcs,buf,len) (GOOD_FCS)
66
67/* we use this for unified pci device name access */
68#define PCIDEV_NAME(pdev) ((pdev)->name)
69
70#else /* 2.5 or later */
71
72/* recent 2.5/2.6 stores pci device names at varying places ;-) */
73#ifdef CONFIG_PCI_NAMES
74/* human readable name */
75#define PCIDEV_NAME(pdev) ((pdev)->pretty_name)
76#else
77/* whatever we get from the associated struct device - bus:slot:dev.fn id */
78#define PCIDEV_NAME(pdev) (pci_name(pdev))
79#endif
80
81#endif
82
83/* ================================================================ */
84
85/* non-standard PCI registers */
86
87enum vlsi_pci_regs {
88 VLSI_PCI_CLKCTL = 0x40, /* chip clock input control */
89 VLSI_PCI_MSTRPAGE = 0x41, /* addr [31:24] for all busmaster cycles */
90 VLSI_PCI_IRMISC = 0x42 /* mainly legacy UART related */
91};
92
93/* ------------------------------------------ */
94
95/* VLSI_PCI_CLKCTL: Clock Control Register (u8, rw) */
96
97/* Three possible clock sources: either on-chip 48MHz PLL or
98 * external clock applied to EXTCLK pin. External clock may
99 * be either 48MHz or 40MHz, which is indicated by XCKSEL.
100 * CLKSTP controls whether the selected clock source gets
101 * connected to the IrDA block.
102 *
103 * On my HP OB-800 the BIOS sets external 40MHz clock as source
104 * when IrDA enabled and I've never detected any PLL lock success.
105 * Apparently the 14.3...MHz OSC input required for the PLL to work
106 * is not connected and the 40MHz EXTCLK is provided externally.
107 * At least this is what makes the driver working for me.
108 */
109
110enum vlsi_pci_clkctl {
111
112 /* PLL control */
113
114 CLKCTL_PD_INV = 0x04, /* PD#: inverted power down signal,
115 * i.e. PLL is powered, if PD_INV set */
116 CLKCTL_LOCK = 0x40, /* (ro) set, if PLL is locked */
117
118 /* clock source selection */
119
120 CLKCTL_EXTCLK = 0x20, /* set to select external clock input, not PLL */
121 CLKCTL_XCKSEL = 0x10, /* set to indicate EXTCLK is 40MHz, not 48MHz */
122
123 /* IrDA block control */
124
125 CLKCTL_CLKSTP = 0x80, /* set to disconnect from selected clock source */
126 CLKCTL_WAKE = 0x08 /* set to enable wakeup feature: whenever IR activity
127 * is detected, PD_INV gets set(?) and CLKSTP cleared */
128};
129
130/* ------------------------------------------ */
131
132/* VLSI_PCI_MSTRPAGE: Master Page Register (u8, rw) and busmastering stuff */
133
134#define DMA_MASK_USED_BY_HW 0xffffffff
135#define DMA_MASK_MSTRPAGE 0x00ffffff
136#define MSTRPAGE_VALUE (DMA_MASK_MSTRPAGE >> 24)
137
138 /* PCI busmastering is somewhat special for this guy - in short:
139 *
140 * We select to operate using fixed MSTRPAGE=0, use ISA DMA
141 * address restrictions to make the PCI BM api aware of this,
142 * but ensure the hardware is dealing with real 32bit access.
143 *
144 * In detail:
145 * The chip executes normal 32bit busmaster cycles, i.e.
146 * drives all 32 address lines. These addresses however are
147 * composed of [0:23] taken from various busaddr-pointers
148 * and [24:31] taken from the MSTRPAGE register in the VLSI82C147
149 * config space. Therefore _all_ busmastering must be
150 * targeted to/from one single 16MB (busaddr-) superpage!
151 * The point is to make sure all the allocations for memory
152 * locations with busmaster access (ring descriptors, buffers)
153 * are indeed bus-mappable to the same 16MB range (for x86 this
154 * means they must reside in the same 16MB physical memory address
155 * range). The only constraint we have which supports "several objects
156 * mappable to common 16MB range" paradigma, is the old ISA DMA
157 * restriction to the first 16MB of physical address range.
158 * Hence the approach here is to enable PCI busmaster support using
159 * the correct 32bit dma-mask used by the chip. Afterwards the device's
160 * dma-mask gets restricted to 24bit, which must be honoured somehow by
161 * all allocations for memory areas to be exposed to the chip ...
162 *
163 * Note:
164 * Don't be surprised to get "Setting latency timer..." messages every
165 * time when PCI busmastering is enabled for the chip.
166 * The chip has its PCI latency timer RO fixed at 0 - which is not a
167 * problem here, because it is never requesting _burst_ transactions.
168 */
169
170/* ------------------------------------------ */
171
172/* VLSI_PCIIRMISC: IR Miscellaneous Register (u8, rw) */
173
174/* legacy UART emulation - not used by this driver - would require:
175 * (see below for some register-value definitions)
176 *
177 * - IRMISC_UARTEN must be set to enable UART address decoding
178 * - IRMISC_UARTSEL configured
179 * - IRCFG_MASTER must be cleared
180 * - IRCFG_SIR must be set
181 * - IRENABLE_PHYANDCLOCK must be asserted 0->1 (and hence IRENABLE_SIR_ON)
182 */
183
184enum vlsi_pci_irmisc {
185
186 /* IR transceiver control */
187
188 IRMISC_IRRAIL = 0x40, /* (ro?) IR rail power indication (and control?)
189 * 0=3.3V / 1=5V. Probably set during power-on?
190 * unclear - not touched by driver */
191 IRMISC_IRPD = 0x08, /* transceiver power down, if set */
192
193 /* legacy UART control */
194
195 IRMISC_UARTTST = 0x80, /* UART test mode - "always write 0" */
196 IRMISC_UARTEN = 0x04, /* enable UART address decoding */
197
198 /* bits [1:0] IRMISC_UARTSEL to select legacy UART address */
199
200 IRMISC_UARTSEL_3f8 = 0x00,
201 IRMISC_UARTSEL_2f8 = 0x01,
202 IRMISC_UARTSEL_3e8 = 0x02,
203 IRMISC_UARTSEL_2e8 = 0x03
204};
205
206/* ================================================================ */
207
208/* registers mapped to 32 byte PCI IO space */
209
210/* note: better access all registers at the indicated u8/u16 size
211 * although some of them contain only 1 byte of information.
212 * some of them (particaluarly PROMPT and IRCFG) ignore
213 * access when using the wrong addressing mode!
214 */
215
216enum vlsi_pio_regs {
217 VLSI_PIO_IRINTR = 0x00, /* interrupt enable/request (u8, rw) */
218 VLSI_PIO_RINGPTR = 0x02, /* rx/tx ring pointer (u16, ro) */
219 VLSI_PIO_RINGBASE = 0x04, /* [23:10] of ring address (u16, rw) */
220 VLSI_PIO_RINGSIZE = 0x06, /* rx/tx ring size (u16, rw) */
221 VLSI_PIO_PROMPT = 0x08, /* triggers ring processing (u16, wo) */
222 /* 0x0a-0x0f: reserved / duplicated UART regs */
223 VLSI_PIO_IRCFG = 0x10, /* configuration select (u16, rw) */
224 VLSI_PIO_SIRFLAG = 0x12, /* BOF/EOF for filtered SIR (u16, ro) */
225 VLSI_PIO_IRENABLE = 0x14, /* enable and status register (u16, rw/ro) */
226 VLSI_PIO_PHYCTL = 0x16, /* physical layer current status (u16, ro) */
227 VLSI_PIO_NPHYCTL = 0x18, /* next physical layer select (u16, rw) */
228 VLSI_PIO_MAXPKT = 0x1a, /* [11:0] max len for packet receive (u16, rw) */
229 VLSI_PIO_RCVBCNT = 0x1c /* current receive-FIFO byte count (u16, ro) */
230 /* 0x1e-0x1f: reserved / duplicated UART regs */
231};
232
233/* ------------------------------------------ */
234
235/* VLSI_PIO_IRINTR: Interrupt Register (u8, rw) */
236
237/* enable-bits:
238 * 1 = enable / 0 = disable
239 * interrupt condition bits:
240 * set according to corresponding interrupt source
241 * (regardless of the state of the enable bits)
242 * enable bit status indicates whether interrupt gets raised
243 * write-to-clear
244 * note: RPKTINT and TPKTINT behave different in legacy UART mode (which we don't use :-)
245 */
246
247enum vlsi_pio_irintr {
248 IRINTR_ACTEN = 0x80, /* activity interrupt enable */
249 IRINTR_ACTIVITY = 0x40, /* activity monitor (traffic detected) */
250 IRINTR_RPKTEN = 0x20, /* receive packet interrupt enable*/
251 IRINTR_RPKTINT = 0x10, /* rx-packet transfered from fifo to memory finished */
252 IRINTR_TPKTEN = 0x08, /* transmit packet interrupt enable */
253 IRINTR_TPKTINT = 0x04, /* last bit of tx-packet+crc shifted to ir-pulser */
254 IRINTR_OE_EN = 0x02, /* UART rx fifo overrun error interrupt enable */
255 IRINTR_OE_INT = 0x01 /* UART rx fifo overrun error (read LSR to clear) */
256};
257
258/* we use this mask to check whether the (shared PCI) interrupt is ours */
259
260#define IRINTR_INT_MASK (IRINTR_ACTIVITY|IRINTR_RPKTINT|IRINTR_TPKTINT)
261
262/* ------------------------------------------ */
263
264/* VLSI_PIO_RINGPTR: Ring Pointer Read-Back Register (u16, ro) */
265
266/* _both_ ring pointers are indices relative to the _entire_ rx,tx-ring!
267 * i.e. the referenced descriptor is located
268 * at RINGBASE + PTR * sizeof(descr) for rx and tx
269 * therefore, the tx-pointer has offset MAX_RING_DESCR
270 */
271
272#define MAX_RING_DESCR 64 /* tx, rx rings may contain up to 64 descr each */
273
274#define RINGPTR_RX_MASK (MAX_RING_DESCR-1)
275#define RINGPTR_TX_MASK ((MAX_RING_DESCR-1)<<8)
276
277#define RINGPTR_GET_RX(p) ((p)&RINGPTR_RX_MASK)
278#define RINGPTR_GET_TX(p) (((p)&RINGPTR_TX_MASK)>>8)
279
280/* ------------------------------------------ */
281
282/* VLSI_PIO_RINGBASE: Ring Pointer Base Address Register (u16, ro) */
283
284/* Contains [23:10] part of the ring base (bus-) address
285 * which must be 1k-alinged. [31:24] is taken from
286 * VLSI_PCI_MSTRPAGE above.
287 * The controller initiates non-burst PCI BM cycles to
288 * fetch and update the descriptors in the ring.
289 * Once fetched, the descriptor remains cached onchip
290 * until it gets closed and updated due to the ring
291 * processing state machine.
292 * The entire ring area is split in rx and tx areas with each
293 * area consisting of 64 descriptors of 8 bytes each.
294 * The rx(tx) ring is located at ringbase+0 (ringbase+64*8).
295 */
296
297#define BUS_TO_RINGBASE(p) (((p)>>10)&0x3fff)
298
299/* ------------------------------------------ */
300
301/* VLSI_PIO_RINGSIZE: Ring Size Register (u16, rw) */
302
303/* bit mask to indicate the ring size to be used for rx and tx.
304 * possible values encoded bits
305 * 4 0000
306 * 8 0001
307 * 16 0011
308 * 32 0111
309 * 64 1111
310 * located at [15:12] for tx and [11:8] for rx ([7:0] unused)
311 *
312 * note: probably a good idea to have IRCFG_MSTR cleared when writing
313 * this so the state machines are stopped and the RINGPTR is reset!
314 */
315
316#define SIZE_TO_BITS(num) ((((num)-1)>>2)&0x0f)
317#define TX_RX_TO_RINGSIZE(tx,rx) ((SIZE_TO_BITS(tx)<<12)|(SIZE_TO_BITS(rx)<<8))
318#define RINGSIZE_TO_RXSIZE(rs) ((((rs)&0x0f00)>>6)+4)
319#define RINGSIZE_TO_TXSIZE(rs) ((((rs)&0xf000)>>10)+4)
320
321
322/* ------------------------------------------ */
323
324/* VLSI_PIO_PROMPT: Ring Prompting Register (u16, write-to-start) */
325
326/* writing any value kicks the ring processing state machines
327 * for both tx, rx rings as follows:
328 * - active rings (currently owning an active descriptor)
329 * ignore the prompt and continue
330 * - idle rings fetch the next descr from the ring and start
331 * their processing
332 */
333
334/* ------------------------------------------ */
335
336/* VLSI_PIO_IRCFG: IR Config Register (u16, rw) */
337
338/* notes:
339 * - not more than one SIR/MIR/FIR bit must be set at any time
340 * - SIR, MIR, FIR and CRC16 select the configuration which will
341 * be applied on next 0->1 transition of IRENABLE_PHYANDCLOCK (see below).
342 * - besides allowing the PCI interface to execute busmaster cycles
343 * and therefore the ring SM to operate, the MSTR bit has side-effects:
344 * when MSTR is cleared, the RINGPTR's get reset and the legacy UART mode
345 * (in contrast to busmaster access mode) gets enabled.
346 * - clearing ENRX or setting ENTX while data is received may stall the
347 * receive fifo until ENRX reenabled _and_ another packet arrives
348 * - SIRFILT means the chip performs the required unwrapping of hardware
349 * headers (XBOF's, BOF/EOF) and un-escaping in the _receive_ direction.
350 * Only the resulting IrLAP payload is copied to the receive buffers -
351 * but with the 16bit FCS still encluded. Question remains, whether it
352 * was already checked or we should do it before passing the packet to IrLAP?
353 */
354
355enum vlsi_pio_ircfg {
356 IRCFG_LOOP = 0x4000, /* enable loopback test mode */
357 IRCFG_ENTX = 0x1000, /* transmit enable */
358 IRCFG_ENRX = 0x0800, /* receive enable */
359 IRCFG_MSTR = 0x0400, /* master enable */
360 IRCFG_RXANY = 0x0200, /* receive any packet */
361 IRCFG_CRC16 = 0x0080, /* 16bit (not 32bit) CRC select for MIR/FIR */
362 IRCFG_FIR = 0x0040, /* FIR 4PPM encoding mode enable */
363 IRCFG_MIR = 0x0020, /* MIR HDLC encoding mode enable */
364 IRCFG_SIR = 0x0010, /* SIR encoding mode enable */
365 IRCFG_SIRFILT = 0x0008, /* enable SIR decode filter (receiver unwrapping) */
366 IRCFG_SIRTEST = 0x0004, /* allow SIR decode filter when not in SIR mode */
367 IRCFG_TXPOL = 0x0002, /* invert tx polarity when set */
368 IRCFG_RXPOL = 0x0001 /* invert rx polarity when set */
369};
370
371/* ------------------------------------------ */
372
373/* VLSI_PIO_SIRFLAG: SIR Flag Register (u16, ro) */
374
375/* register contains hardcoded BOF=0xc0 at [7:0] and EOF=0xc1 at [15:8]
376 * which is used for unwrapping received frames in SIR decode-filter mode
377 */
378
379/* ------------------------------------------ */
380
381/* VLSI_PIO_IRENABLE: IR Enable Register (u16, rw/ro) */
382
383/* notes:
384 * - IREN acts as gate for latching the configured IR mode information
385 * from IRCFG and IRPHYCTL when IREN=reset and applying them when
386 * IREN gets set afterwards.
387 * - ENTXST reflects IRCFG_ENTX
388 * - ENRXST = IRCFG_ENRX && (!IRCFG_ENTX || IRCFG_LOOP)
389 */
390
391enum vlsi_pio_irenable {
392 IRENABLE_PHYANDCLOCK = 0x8000, /* enable IR phy and gate the mode config (rw) */
393 IRENABLE_CFGER = 0x4000, /* mode configuration error (ro) */
394 IRENABLE_FIR_ON = 0x2000, /* FIR on status (ro) */
395 IRENABLE_MIR_ON = 0x1000, /* MIR on status (ro) */
396 IRENABLE_SIR_ON = 0x0800, /* SIR on status (ro) */
397 IRENABLE_ENTXST = 0x0400, /* transmit enable status (ro) */
398 IRENABLE_ENRXST = 0x0200, /* Receive enable status (ro) */
399 IRENABLE_CRC16_ON = 0x0100 /* 16bit (not 32bit) CRC enabled status (ro) */
400};
401
402#define IRENABLE_MASK 0xff00 /* Read mask */
403
404/* ------------------------------------------ */
405
406/* VLSI_PIO_PHYCTL: IR Physical Layer Current Control Register (u16, ro) */
407
408/* read-back of the currently applied physical layer status.
409 * applied from VLSI_PIO_NPHYCTL at rising edge of IRENABLE_PHYANDCLOCK
410 * contents identical to VLSI_PIO_NPHYCTL (see below)
411 */
412
413/* ------------------------------------------ */
414
415/* VLSI_PIO_NPHYCTL: IR Physical Layer Next Control Register (u16, rw) */
416
417/* latched during IRENABLE_PHYANDCLOCK=0 and applied at 0-1 transition
418 *
419 * consists of BAUD[15:10], PLSWID[9:5] and PREAMB[4:0] bits defined as follows:
420 *
421 * SIR-mode: BAUD = (115.2kHz / baudrate) - 1
422 * PLSWID = (pulsetime * freq / (BAUD+1)) - 1
423 * where pulsetime is the requested IrPHY pulse width
424 * and freq is 8(16)MHz for 40(48)MHz primary input clock
425 * PREAMB: don't care for SIR
426 *
427 * The nominal SIR pulse width is 3/16 bit time so we have PLSWID=12
428 * fixed for all SIR speeds at 40MHz input clock (PLSWID=24 at 48MHz).
429 * IrPHY also allows shorter pulses down to the nominal pulse duration
430 * at 115.2kbaud (minus some tolerance) which is 1.41 usec.
431 * Using the expression PLSWID = 12/(BAUD+1)-1 (multiplied by two for 48MHz)
432 * we get the minimum acceptable PLSWID values according to the VLSI
433 * specification, which provides 1.5 usec pulse width for all speeds (except
434 * for 2.4kbaud getting 6usec). This is fine with IrPHY v1.3 specs and
435 * reduces the transceiver power which drains the battery. At 9.6kbaud for
436 * example this amounts to more than 90% battery power saving!
437 *
438 * MIR-mode: BAUD = 0
439 * PLSWID = 9(10) for 40(48) MHz input clock
440 * to get nominal MIR pulse width
441 * PREAMB = 1
442 *
443 * FIR-mode: BAUD = 0
444 * PLSWID: don't care
445 * PREAMB = 15
446 */
447
448#define PHYCTL_BAUD_SHIFT 10
449#define PHYCTL_BAUD_MASK 0xfc00
450#define PHYCTL_PLSWID_SHIFT 5
451#define PHYCTL_PLSWID_MASK 0x03e0
452#define PHYCTL_PREAMB_SHIFT 0
453#define PHYCTL_PREAMB_MASK 0x001f
454
455#define PHYCTL_TO_BAUD(bwp) (((bwp)&PHYCTL_BAUD_MASK)>>PHYCTL_BAUD_SHIFT)
456#define PHYCTL_TO_PLSWID(bwp) (((bwp)&PHYCTL_PLSWID_MASK)>>PHYCTL_PLSWID_SHIFT)
457#define PHYCTL_TO_PREAMB(bwp) (((bwp)&PHYCTL_PREAMB_MASK)>>PHYCTL_PREAMB_SHIFT)
458
459#define BWP_TO_PHYCTL(b,w,p) ((((b)<<PHYCTL_BAUD_SHIFT)&PHYCTL_BAUD_MASK) \
460 | (((w)<<PHYCTL_PLSWID_SHIFT)&PHYCTL_PLSWID_MASK) \
461 | (((p)<<PHYCTL_PREAMB_SHIFT)&PHYCTL_PREAMB_MASK))
462
463#define BAUD_BITS(br) ((115200/(br))-1)
464
465static inline unsigned
466calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
467{
468 unsigned tmp;
469
470 if (widthselect) /* nominal 3/16 puls width */
471 return (clockselect) ? 12 : 24;
472
473 tmp = ((clockselect) ? 12 : 24) / (BAUD_BITS(baudrate)+1);
474
475 /* intermediate result of integer division needed here */
476
477 return (tmp>0) ? (tmp-1) : 0;
478}
479
480#define PHYCTL_SIR(br,ws,cs) BWP_TO_PHYCTL(BAUD_BITS(br),calc_width_bits((br),(ws),(cs)),0)
481#define PHYCTL_MIR(cs) BWP_TO_PHYCTL(0,((cs)?9:10),1)
482#define PHYCTL_FIR BWP_TO_PHYCTL(0,0,15)
483
484/* quite ugly, I know. But implementing these calculations here avoids
485 * having magic numbers in the code and allows some playing with pulsewidths
486 * without risk to violate the standards.
487 * FWIW, here is the table for reference:
488 *
489 * baudrate BAUD min-PLSWID nom-PLSWID PREAMB
490 * 2400 47 0(0) 12(24) 0
491 * 9600 11 0(0) 12(24) 0
492 * 19200 5 1(2) 12(24) 0
493 * 38400 2 3(6) 12(24) 0
494 * 57600 1 5(10) 12(24) 0
495 * 115200 0 11(22) 12(24) 0
496 * MIR 0 - 9(10) 1
497 * FIR 0 - 0 15
498 *
499 * note: x(y) means x-value for 40MHz / y-value for 48MHz primary input clock
500 */
501
502/* ------------------------------------------ */
503
504
505/* VLSI_PIO_MAXPKT: Maximum Packet Length register (u16, rw) */
506
507/* maximum acceptable length for received packets */
508
509/* hw imposed limitation - register uses only [11:0] */
510#define MAX_PACKET_LENGTH 0x0fff
511
512/* IrLAP I-field (apparently not defined elsewhere) */
513#define IRDA_MTU 2048
514
515/* complete packet consists of A(1)+C(1)+I(<=IRDA_MTU) */
516#define IRLAP_SKB_ALLOCSIZE (1+1+IRDA_MTU)
517
518/* the buffers we use to exchange frames with the hardware need to be
519 * larger than IRLAP_SKB_ALLOCSIZE because we may have up to 4 bytes FCS
520 * appended and, in SIR mode, a lot of frame wrapping bytes. The worst
521 * case appears to be a SIR packet with I-size==IRDA_MTU and all bytes
522 * requiring to be escaped to provide transparency. Furthermore, the peer
523 * might ask for quite a number of additional XBOFs:
524 * up to 115+48 XBOFS 163
525 * regular BOF 1
526 * A-field 1
527 * C-field 1
528 * I-field, IRDA_MTU, all escaped 4096
529 * FCS (16 bit at SIR, escaped) 4
530 * EOF 1
531 * AFAICS nothing in IrLAP guarantees A/C field not to need escaping
532 * (f.e. 0xc0/0xc1 - i.e. BOF/EOF - are legal values there) so in the
533 * worst case we have 4269 bytes total frame size.
534 * However, the VLSI uses 12 bits only for all buffer length values,
535 * which limits the maximum useable buffer size <= 4095.
536 * Note this is not a limitation in the receive case because we use
537 * the SIR filtering mode where the hw unwraps the frame and only the
538 * bare packet+fcs is stored into the buffer - in contrast to the SIR
539 * tx case where we have to pass frame-wrapped packets to the hw.
540 * If this would ever become an issue in real life, the only workaround
541 * I see would be using the legacy UART emulation in SIR mode.
542 */
543
544#define XFER_BUF_SIZE MAX_PACKET_LENGTH
545
546/* ------------------------------------------ */
547
548/* VLSI_PIO_RCVBCNT: Receive Byte Count Register (u16, ro) */
549
550/* receive packet counter gets incremented on every non-filtered
551 * byte which was put in the receive fifo and reset for each
552 * new packet. Used to decide whether we are just in the middle
553 * of receiving
554 */
555
556/* better apply the [11:0] mask when reading, as some docs say the
557 * reserved [15:12] would return 1 when reading - which is wrong AFAICS
558 */
559#define RCVBCNT_MASK 0x0fff
560
561/******************************************************************/
562
563/* descriptors for rx/tx ring
564 *
565 * accessed by hardware - don't change!
566 *
567 * the descriptor is owned by hardware, when the ACTIVE status bit
568 * is set and nothing (besides reading status to test the bit)
569 * shall be done. The bit gets cleared by hw, when the descriptor
570 * gets closed. Premature reaping of descriptors owned be the chip
571 * can be achieved by disabling IRCFG_MSTR
572 *
573 * Attention: Writing addr overwrites status!
574 *
575 * ### FIXME: depends on endianess (but there ain't no non-i586 ob800 ;-)
576 */
577
578struct ring_descr_hw {
579 volatile u16 rd_count; /* tx/rx count [11:0] */
580 u16 reserved;
581 union {
582 u32 addr; /* [23:0] of the buffer's busaddress */
583 struct {
584 u8 addr_res[3];
585 volatile u8 status; /* descriptor status */
586 } rd_s __attribute__((packed));
587 } rd_u __attribute((packed));
588} __attribute__ ((packed));
589
590#define rd_addr rd_u.addr
591#define rd_status rd_u.rd_s.status
592
593/* ring descriptor status bits */
594
595#define RD_ACTIVE 0x80 /* descriptor owned by hw (both TX,RX) */
596
597/* TX ring descriptor status */
598
599#define RD_TX_DISCRC 0x40 /* do not send CRC (for SIR) */
600#define RD_TX_BADCRC 0x20 /* force a bad CRC */
601#define RD_TX_PULSE 0x10 /* send indication pulse after this frame (MIR/FIR) */
602#define RD_TX_FRCEUND 0x08 /* force underrun */
603#define RD_TX_CLRENTX 0x04 /* clear ENTX after this frame */
604#define RD_TX_UNDRN 0x01 /* TX fifo underrun (probably PCI problem) */
605
606/* RX ring descriptor status */
607
608#define RD_RX_PHYERR 0x40 /* physical encoding error */
609#define RD_RX_CRCERR 0x20 /* CRC error (MIR/FIR) */
610#define RD_RX_LENGTH 0x10 /* frame exceeds buffer length */
611#define RD_RX_OVER 0x08 /* RX fifo overrun (probably PCI problem) */
612#define RD_RX_SIRBAD 0x04 /* EOF missing: BOF follows BOF (SIR, filtered) */
613
614#define RD_RX_ERROR 0x7c /* any error in received frame */
615
616/* the memory required to hold the 2 descriptor rings */
617#define HW_RING_AREA_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_descr_hw))
618
619/******************************************************************/
620
621/* sw-ring descriptors consists of a bus-mapped transfer buffer with
622 * associated skb and a pointer to the hw entry descriptor
623 */
624
625struct ring_descr {
626 struct ring_descr_hw *hw;
627 struct sk_buff *skb;
628 void *buf;
629};
630
631/* wrappers for operations on hw-exposed ring descriptors
632 * access to the hw-part of the descriptors must use these.
633 */
634
635static inline int rd_is_active(struct ring_descr *rd)
636{
637 return ((rd->hw->rd_status & RD_ACTIVE) != 0);
638}
639
640static inline void rd_activate(struct ring_descr *rd)
641{
642 rd->hw->rd_status |= RD_ACTIVE;
643}
644
645static inline void rd_set_status(struct ring_descr *rd, u8 s)
646{
647 rd->hw->rd_status = s; /* may pass ownership to the hardware */
648}
649
650static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
651{
652 /* order is important for two reasons:
653 * - overlayed: writing addr overwrites status
654 * - we want to write status last so we have valid address in
655 * case status has RD_ACTIVE set
656 */
657
658 if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
659 IRDA_ERROR("%s: pci busaddr inconsistency!\n", __FUNCTION__);
660 dump_stack();
661 return;
662 }
663
664 a &= DMA_MASK_MSTRPAGE; /* clear highbyte to make sure we won't write
665 * to status - just in case MSTRPAGE_VALUE!=0
666 */
667 rd->hw->rd_addr = cpu_to_le32(a);
668 wmb();
669 rd_set_status(rd, s); /* may pass ownership to the hardware */
670}
671
672static inline void rd_set_count(struct ring_descr *rd, u16 c)
673{
674 rd->hw->rd_count = cpu_to_le16(c);
675}
676
677static inline u8 rd_get_status(struct ring_descr *rd)
678{
679 return rd->hw->rd_status;
680}
681
682static inline dma_addr_t rd_get_addr(struct ring_descr *rd)
683{
684 dma_addr_t a;
685
686 a = le32_to_cpu(rd->hw->rd_addr);
687 return (a & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
688}
689
690static inline u16 rd_get_count(struct ring_descr *rd)
691{
692 return le16_to_cpu(rd->hw->rd_count);
693}
694
695/******************************************************************/
696
697/* sw descriptor rings for rx, tx:
698 *
699 * operations follow producer-consumer paradigm, with the hw
700 * in the middle doing the processing.
701 * ring size must be power of two.
702 *
703 * producer advances r->tail after inserting for processing
704 * consumer advances r->head after removing processed rd
705 * ring is empty if head==tail / full if (tail+1)==head
706 */
707
708struct vlsi_ring {
709 struct pci_dev *pdev;
710 int dir;
711 unsigned len;
712 unsigned size;
713 unsigned mask;
714 atomic_t head, tail;
715 struct ring_descr *rd;
716};
717
718/* ring processing helpers */
719
720static inline struct ring_descr *ring_last(struct vlsi_ring *r)
721{
722 int t;
723
724 t = atomic_read(&r->tail) & r->mask;
725 return (((t+1) & r->mask) == (atomic_read(&r->head) & r->mask)) ? NULL : &r->rd[t];
726}
727
728static inline struct ring_descr *ring_put(struct vlsi_ring *r)
729{
730 atomic_inc(&r->tail);
731 return ring_last(r);
732}
733
734static inline struct ring_descr *ring_first(struct vlsi_ring *r)
735{
736 int h;
737
738 h = atomic_read(&r->head) & r->mask;
739 return (h == (atomic_read(&r->tail) & r->mask)) ? NULL : &r->rd[h];
740}
741
742static inline struct ring_descr *ring_get(struct vlsi_ring *r)
743{
744 atomic_inc(&r->head);
745 return ring_first(r);
746}
747
748/******************************************************************/
749
750/* our private compound VLSI-PCI-IRDA device information */
751
752typedef struct vlsi_irda_dev {
753 struct pci_dev *pdev;
754 struct net_device_stats stats;
755
756 struct irlap_cb *irlap;
757
758 struct qos_info qos;
759
760 unsigned mode;
761 int baud, new_baud;
762
763 dma_addr_t busaddr;
764 void *virtaddr;
765 struct vlsi_ring *tx_ring, *rx_ring;
766
767 struct timeval last_rx;
768
769 spinlock_t lock;
770 struct semaphore sem;
771
772 u8 resume_ok;
773 struct proc_dir_entry *proc_entry;
774
775} vlsi_irda_dev_t;
776
777/********************************************************/
778
779/* the remapped error flags we use for returning from frame
780 * post-processing in vlsi_process_tx/rx() after it was completed
781 * by the hardware. These functions either return the >=0 number
782 * of transfered bytes in case of success or the negative (-)
783 * of the or'ed error flags.
784 */
785
786#define VLSI_TX_DROP 0x0001
787#define VLSI_TX_FIFO 0x0002
788
789#define VLSI_RX_DROP 0x0100
790#define VLSI_RX_OVER 0x0200
791#define VLSI_RX_LENGTH 0x0400
792#define VLSI_RX_FRAME 0x0800
793#define VLSI_RX_CRC 0x1000
794
795/********************************************************/
796
797#endif /* IRDA_VLSI_FIR_H */
798
diff --git a/drivers/net/irda/w83977af.h b/drivers/net/irda/w83977af.h
new file mode 100644
index 000000000000..04476c2e9121
--- /dev/null
+++ b/drivers/net/irda/w83977af.h
@@ -0,0 +1,53 @@
1#ifndef W83977AF_H
2#define W83977AF_H
3
4#define W977_EFIO_BASE 0x370
5#define W977_EFIO2_BASE 0x3f0
6#define W977_DEVICE_IR 0x06
7
8
9/*
10 * Enter extended function mode
11 */
12static inline void w977_efm_enter(unsigned int efio)
13{
14 outb(0x87, efio);
15 outb(0x87, efio);
16}
17
18/*
19 * Select a device to configure
20 */
21
22static inline void w977_select_device(__u8 devnum, unsigned int efio)
23{
24 outb(0x07, efio);
25 outb(devnum, efio+1);
26}
27
28/*
29 * Write a byte to a register
30 */
31static inline void w977_write_reg(__u8 reg, __u8 value, unsigned int efio)
32{
33 outb(reg, efio);
34 outb(value, efio+1);
35}
36
37/*
38 * read a byte from a register
39 */
40static inline __u8 w977_read_reg(__u8 reg, unsigned int efio)
41{
42 outb(reg, efio);
43 return inb(efio+1);
44}
45
46/*
47 * Exit extended function mode
48 */
49static inline void w977_efm_exit(unsigned int efio)
50{
51 outb(0xAA, efio);
52}
53#endif
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
new file mode 100644
index 000000000000..0ea65c4c6f85
--- /dev/null
+++ b/drivers/net/irda/w83977af_ir.c
@@ -0,0 +1,1379 @@
1/*********************************************************************
2 *
3 * Filename: w83977af_ir.c
4 * Version: 1.0
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
22 * and at no charge.
23 *
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
26 * similar.
27 *
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
31 *
32 * __u8 bank;
33 *
34 * bank = inb( iobase+BSR);
35 *
36 * do_your_stuff_here();
37 *
38 * outb( bank, iobase+BSR);
39 *
40 ********************************************************************/
41
42#include <linux/module.h>
43#include <linux/config.h>
44#include <linux/kernel.h>
45#include <linux/types.h>
46#include <linux/skbuff.h>
47#include <linux/netdevice.h>
48#include <linux/ioport.h>
49#include <linux/delay.h>
50#include <linux/slab.h>
51#include <linux/init.h>
52#include <linux/rtnetlink.h>
53#include <linux/dma-mapping.h>
54
55#include <asm/io.h>
56#include <asm/dma.h>
57#include <asm/byteorder.h>
58
59#include <net/irda/irda.h>
60#include <net/irda/wrapper.h>
61#include <net/irda/irda_device.h>
62#include "w83977af.h"
63#include "w83977af_ir.h"
64
65#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
66#undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
67#define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
68#endif
69#undef CONFIG_USE_INTERNAL_TIMER /* Just cannot make that timer work */
70#define CONFIG_USE_W977_PNP /* Currently needed */
71#define PIO_MAX_SPEED 115200
72
73static char *driver_name = "w83977af_ir";
74static int qos_mtt_bits = 0x07; /* 1 ms or more */
75
76#define CHIP_IO_EXTENT 8
77
78static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
79#ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
80static unsigned int irq[] = { 6, 0, 0, 0 };
81#else
82static unsigned int irq[] = { 11, 0, 0, 0 };
83#endif
84static unsigned int dma[] = { 1, 0, 0, 0 };
85static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
86static unsigned int efio = W977_EFIO_BASE;
87
88static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
89
90/* Some prototypes */
91static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
92 unsigned int dma);
93static int w83977af_close(struct w83977af_ir *self);
94static int w83977af_probe(int iobase, int irq, int dma);
95static int w83977af_dma_receive(struct w83977af_ir *self);
96static int w83977af_dma_receive_complete(struct w83977af_ir *self);
97static int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev);
98static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
99static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
100static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
101static int w83977af_is_receiving(struct w83977af_ir *self);
102
103static int w83977af_net_open(struct net_device *dev);
104static int w83977af_net_close(struct net_device *dev);
105static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
106static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev);
107
108/*
109 * Function w83977af_init ()
110 *
111 * Initialize chip. Just try to find out how many chips we are dealing with
112 * and where they are
113 */
114static int __init w83977af_init(void)
115{
116 int i;
117
118 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
119
120 for (i=0; (io[i] < 2000) && (i < 4); i++) {
121 if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
122 return 0;
123 }
124 return -ENODEV;
125}
126
127/*
128 * Function w83977af_cleanup ()
129 *
130 * Close all configured chips
131 *
132 */
133static void __exit w83977af_cleanup(void)
134{
135 int i;
136
137 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
138
139 for (i=0; i < 4; i++) {
140 if (dev_self[i])
141 w83977af_close(dev_self[i]);
142 }
143}
144
145/*
146 * Function w83977af_open (iobase, irq)
147 *
148 * Open driver instance
149 *
150 */
151int w83977af_open(int i, unsigned int iobase, unsigned int irq,
152 unsigned int dma)
153{
154 struct net_device *dev;
155 struct w83977af_ir *self;
156 int err;
157
158 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
159
160 /* Lock the port that we need */
161 if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
162 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
163 __FUNCTION__ , iobase);
164 return -ENODEV;
165 }
166
167 if (w83977af_probe(iobase, irq, dma) == -1) {
168 err = -1;
169 goto err_out;
170 }
171 /*
172 * Allocate new instance of the driver
173 */
174 dev = alloc_irdadev(sizeof(struct w83977af_ir));
175 if (dev == NULL) {
176 printk( KERN_ERR "IrDA: Can't allocate memory for "
177 "IrDA control block!\n");
178 err = -ENOMEM;
179 goto err_out;
180 }
181
182 self = dev->priv;
183 spin_lock_init(&self->lock);
184
185
186 /* Initialize IO */
187 self->io.fir_base = iobase;
188 self->io.irq = irq;
189 self->io.fir_ext = CHIP_IO_EXTENT;
190 self->io.dma = dma;
191 self->io.fifo_size = 32;
192
193 /* Initialize QoS for this device */
194 irda_init_max_qos_capabilies(&self->qos);
195
196 /* The only value we must override it the baudrate */
197
198 /* FIXME: The HP HDLS-1100 does not support 1152000! */
199 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
200 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
201
202 /* The HP HDLS-1100 needs 1 ms according to the specs */
203 self->qos.min_turn_time.bits = qos_mtt_bits;
204 irda_qos_bits_to_value(&self->qos);
205
206 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
207 self->rx_buff.truesize = 14384;
208 self->tx_buff.truesize = 4000;
209
210 /* Allocate memory if needed */
211 self->rx_buff.head =
212 dma_alloc_coherent(NULL, self->rx_buff.truesize,
213 &self->rx_buff_dma, GFP_KERNEL);
214 if (self->rx_buff.head == NULL) {
215 err = -ENOMEM;
216 goto err_out1;
217 }
218
219 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
220
221 self->tx_buff.head =
222 dma_alloc_coherent(NULL, self->tx_buff.truesize,
223 &self->tx_buff_dma, GFP_KERNEL);
224 if (self->tx_buff.head == NULL) {
225 err = -ENOMEM;
226 goto err_out2;
227 }
228 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
229
230 self->rx_buff.in_frame = FALSE;
231 self->rx_buff.state = OUTSIDE_FRAME;
232 self->tx_buff.data = self->tx_buff.head;
233 self->rx_buff.data = self->rx_buff.head;
234 self->netdev = dev;
235
236 /* Keep track of module usage */
237 SET_MODULE_OWNER(dev);
238
239 /* Override the network functions we need to use */
240 dev->hard_start_xmit = w83977af_hard_xmit;
241 dev->open = w83977af_net_open;
242 dev->stop = w83977af_net_close;
243 dev->do_ioctl = w83977af_net_ioctl;
244 dev->get_stats = w83977af_net_get_stats;
245
246 err = register_netdev(dev);
247 if (err) {
248 IRDA_ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__);
249 goto err_out3;
250 }
251 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
252
253 /* Need to store self somewhere */
254 dev_self[i] = self;
255
256 return 0;
257err_out3:
258 dma_free_coherent(NULL, self->tx_buff.truesize,
259 self->tx_buff.head, self->tx_buff_dma);
260err_out2:
261 dma_free_coherent(NULL, self->rx_buff.truesize,
262 self->rx_buff.head, self->rx_buff_dma);
263err_out1:
264 free_netdev(dev);
265err_out:
266 release_region(iobase, CHIP_IO_EXTENT);
267 return err;
268}
269
270/*
271 * Function w83977af_close (self)
272 *
273 * Close driver instance
274 *
275 */
276static int w83977af_close(struct w83977af_ir *self)
277{
278 int iobase;
279
280 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
281
282 iobase = self->io.fir_base;
283
284#ifdef CONFIG_USE_W977_PNP
285 /* enter PnP configuration mode */
286 w977_efm_enter(efio);
287
288 w977_select_device(W977_DEVICE_IR, efio);
289
290 /* Deactivate device */
291 w977_write_reg(0x30, 0x00, efio);
292
293 w977_efm_exit(efio);
294#endif /* CONFIG_USE_W977_PNP */
295
296 /* Remove netdevice */
297 unregister_netdev(self->netdev);
298
299 /* Release the PORT that this driver is using */
300 IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
301 __FUNCTION__ , self->io.fir_base);
302 release_region(self->io.fir_base, self->io.fir_ext);
303
304 if (self->tx_buff.head)
305 dma_free_coherent(NULL, self->tx_buff.truesize,
306 self->tx_buff.head, self->tx_buff_dma);
307
308 if (self->rx_buff.head)
309 dma_free_coherent(NULL, self->rx_buff.truesize,
310 self->rx_buff.head, self->rx_buff_dma);
311
312 free_netdev(self->netdev);
313
314 return 0;
315}
316
317int w83977af_probe( int iobase, int irq, int dma)
318{
319 int version;
320 int i;
321
322 for (i=0; i < 2; i++) {
323 IRDA_DEBUG( 0, "%s()\n", __FUNCTION__ );
324#ifdef CONFIG_USE_W977_PNP
325 /* Enter PnP configuration mode */
326 w977_efm_enter(efbase[i]);
327
328 w977_select_device(W977_DEVICE_IR, efbase[i]);
329
330 /* Configure PnP port, IRQ, and DMA channel */
331 w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
332 w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
333
334 w977_write_reg(0x70, irq, efbase[i]);
335#ifdef CONFIG_ARCH_NETWINDER
336 /* Netwinder uses 1 higher than Linux */
337 w977_write_reg(0x74, dma+1, efbase[i]);
338#else
339 w977_write_reg(0x74, dma, efbase[i]);
340#endif /*CONFIG_ARCH_NETWINDER */
341 w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
342
343 /* Set append hardware CRC, enable IR bank selection */
344 w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
345
346 /* Activate device */
347 w977_write_reg(0x30, 0x01, efbase[i]);
348
349 w977_efm_exit(efbase[i]);
350#endif /* CONFIG_USE_W977_PNP */
351 /* Disable Advanced mode */
352 switch_bank(iobase, SET2);
353 outb(iobase+2, 0x00);
354
355 /* Turn on UART (global) interrupts */
356 switch_bank(iobase, SET0);
357 outb(HCR_EN_IRQ, iobase+HCR);
358
359 /* Switch to advanced mode */
360 switch_bank(iobase, SET2);
361 outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
362
363 /* Set default IR-mode */
364 switch_bank(iobase, SET0);
365 outb(HCR_SIR, iobase+HCR);
366
367 /* Read the Advanced IR ID */
368 switch_bank(iobase, SET3);
369 version = inb(iobase+AUID);
370
371 /* Should be 0x1? */
372 if (0x10 == (version & 0xf0)) {
373 efio = efbase[i];
374
375 /* Set FIFO size to 32 */
376 switch_bank(iobase, SET2);
377 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
378
379 /* Set FIFO threshold to TX17, RX16 */
380 switch_bank(iobase, SET0);
381 outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
382 UFR_EN_FIFO,iobase+UFR);
383
384 /* Receiver frame length */
385 switch_bank(iobase, SET4);
386 outb(2048 & 0xff, iobase+6);
387 outb((2048 >> 8) & 0x1f, iobase+7);
388
389 /*
390 * Init HP HSDL-1100 transceiver.
391 *
392 * Set IRX_MSL since we have 2 * receive paths IRRX,
393 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
394 * be a input pin used for IRRXH
395 *
396 * IRRX pin 37 connected to receiver
397 * IRTX pin 38 connected to transmitter
398 * FIRRX pin 39 connected to receiver (IRSL0)
399 * CIRRX pin 40 connected to pin 37
400 */
401 switch_bank(iobase, SET7);
402 outb(0x40, iobase+7);
403
404 IRDA_MESSAGE("W83977AF (IR) driver loaded. "
405 "Version: 0x%02x\n", version);
406
407 return 0;
408 } else {
409 /* Try next extented function register address */
410 IRDA_DEBUG( 0, "%s(), Wrong chip version", __FUNCTION__ );
411 }
412 }
413 return -1;
414}
415
416void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
417{
418 int ir_mode = HCR_SIR;
419 int iobase;
420 __u8 set;
421
422 iobase = self->io.fir_base;
423
424 /* Update accounting for new speed */
425 self->io.speed = speed;
426
427 /* Save current bank */
428 set = inb(iobase+SSR);
429
430 /* Disable interrupts */
431 switch_bank(iobase, SET0);
432 outb(0, iobase+ICR);
433
434 /* Select Set 2 */
435 switch_bank(iobase, SET2);
436 outb(0x00, iobase+ABHL);
437
438 switch (speed) {
439 case 9600: outb(0x0c, iobase+ABLL); break;
440 case 19200: outb(0x06, iobase+ABLL); break;
441 case 38400: outb(0x03, iobase+ABLL); break;
442 case 57600: outb(0x02, iobase+ABLL); break;
443 case 115200: outb(0x01, iobase+ABLL); break;
444 case 576000:
445 ir_mode = HCR_MIR_576;
446 IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __FUNCTION__ );
447 break;
448 case 1152000:
449 ir_mode = HCR_MIR_1152;
450 IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __FUNCTION__ );
451 break;
452 case 4000000:
453 ir_mode = HCR_FIR;
454 IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __FUNCTION__ );
455 break;
456 default:
457 ir_mode = HCR_FIR;
458 IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __FUNCTION__ , speed);
459 break;
460 }
461
462 /* Set speed mode */
463 switch_bank(iobase, SET0);
464 outb(ir_mode, iobase+HCR);
465
466 /* set FIFO size to 32 */
467 switch_bank(iobase, SET2);
468 outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
469
470 /* set FIFO threshold to TX17, RX16 */
471 switch_bank(iobase, SET0);
472 outb(0x00, iobase+UFR); /* Reset */
473 outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
474 outb(0xa7, iobase+UFR);
475
476 netif_wake_queue(self->netdev);
477
478 /* Enable some interrupts so we can receive frames */
479 switch_bank(iobase, SET0);
480 if (speed > PIO_MAX_SPEED) {
481 outb(ICR_EFSFI, iobase+ICR);
482 w83977af_dma_receive(self);
483 } else
484 outb(ICR_ERBRI, iobase+ICR);
485
486 /* Restore SSR */
487 outb(set, iobase+SSR);
488}
489
490/*
491 * Function w83977af_hard_xmit (skb, dev)
492 *
493 * Sets up a DMA transfer to send the current frame.
494 *
495 */
496int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
497{
498 struct w83977af_ir *self;
499 __s32 speed;
500 int iobase;
501 __u8 set;
502 int mtt;
503
504 self = (struct w83977af_ir *) dev->priv;
505
506 iobase = self->io.fir_base;
507
508 IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __FUNCTION__ , jiffies,
509 (int) skb->len);
510
511 /* Lock transmit buffer */
512 netif_stop_queue(dev);
513
514 /* Check if we need to change the speed */
515 speed = irda_get_next_speed(skb);
516 if ((speed != self->io.speed) && (speed != -1)) {
517 /* Check for empty frame */
518 if (!skb->len) {
519 w83977af_change_speed(self, speed);
520 dev->trans_start = jiffies;
521 dev_kfree_skb(skb);
522 return 0;
523 } else
524 self->new_speed = speed;
525 }
526
527 /* Save current set */
528 set = inb(iobase+SSR);
529
530 /* Decide if we should use PIO or DMA transfer */
531 if (self->io.speed > PIO_MAX_SPEED) {
532 self->tx_buff.data = self->tx_buff.head;
533 memcpy(self->tx_buff.data, skb->data, skb->len);
534 self->tx_buff.len = skb->len;
535
536 mtt = irda_get_mtt(skb);
537#ifdef CONFIG_USE_INTERNAL_TIMER
538 if (mtt > 50) {
539 /* Adjust for timer resolution */
540 mtt /= 1000+1;
541
542 /* Setup timer */
543 switch_bank(iobase, SET4);
544 outb(mtt & 0xff, iobase+TMRL);
545 outb((mtt >> 8) & 0x0f, iobase+TMRH);
546
547 /* Start timer */
548 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
549 self->io.direction = IO_XMIT;
550
551 /* Enable timer interrupt */
552 switch_bank(iobase, SET0);
553 outb(ICR_ETMRI, iobase+ICR);
554 } else {
555#endif
556 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __FUNCTION__ , jiffies, mtt);
557 if (mtt)
558 udelay(mtt);
559
560 /* Enable DMA interrupt */
561 switch_bank(iobase, SET0);
562 outb(ICR_EDMAI, iobase+ICR);
563 w83977af_dma_write(self, iobase);
564#ifdef CONFIG_USE_INTERNAL_TIMER
565 }
566#endif
567 } else {
568 self->tx_buff.data = self->tx_buff.head;
569 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
570 self->tx_buff.truesize);
571
572 /* Add interrupt on tx low level (will fire immediately) */
573 switch_bank(iobase, SET0);
574 outb(ICR_ETXTHI, iobase+ICR);
575 }
576 dev->trans_start = jiffies;
577 dev_kfree_skb(skb);
578
579 /* Restore set register */
580 outb(set, iobase+SSR);
581
582 return 0;
583}
584
585/*
586 * Function w83977af_dma_write (self, iobase)
587 *
588 * Send frame using DMA
589 *
590 */
591static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
592{
593 __u8 set;
594#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
595 unsigned long flags;
596 __u8 hcr;
597#endif
598 IRDA_DEBUG(4, "%s(), len=%d\n", __FUNCTION__ , self->tx_buff.len);
599
600 /* Save current set */
601 set = inb(iobase+SSR);
602
603 /* Disable DMA */
604 switch_bank(iobase, SET0);
605 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
606
607 /* Choose transmit DMA channel */
608 switch_bank(iobase, SET2);
609 outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
610#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
611 spin_lock_irqsave(&self->lock, flags);
612
613 disable_dma(self->io.dma);
614 clear_dma_ff(self->io.dma);
615 set_dma_mode(self->io.dma, DMA_MODE_READ);
616 set_dma_addr(self->io.dma, self->tx_buff_dma);
617 set_dma_count(self->io.dma, self->tx_buff.len);
618#else
619 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
620 DMA_MODE_WRITE);
621#endif
622 self->io.direction = IO_XMIT;
623
624 /* Enable DMA */
625 switch_bank(iobase, SET0);
626#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
627 hcr = inb(iobase+HCR);
628 outb(hcr | HCR_EN_DMA, iobase+HCR);
629 enable_dma(self->io.dma);
630 spin_unlock_irqrestore(&self->lock, flags);
631#else
632 outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
633#endif
634
635 /* Restore set register */
636 outb(set, iobase+SSR);
637}
638
639/*
640 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
641 *
642 *
643 *
644 */
645static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
646{
647 int actual = 0;
648 __u8 set;
649
650 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
651
652 /* Save current bank */
653 set = inb(iobase+SSR);
654
655 switch_bank(iobase, SET0);
656 if (!(inb_p(iobase+USR) & USR_TSRE)) {
657 IRDA_DEBUG(4,
658 "%s(), warning, FIFO not empty yet!\n", __FUNCTION__ );
659
660 fifo_size -= 17;
661 IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
662 __FUNCTION__ , fifo_size);
663 }
664
665 /* Fill FIFO with current frame */
666 while ((fifo_size-- > 0) && (actual < len)) {
667 /* Transmit next byte */
668 outb(buf[actual++], iobase+TBR);
669 }
670
671 IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
672 __FUNCTION__ , fifo_size, actual, len);
673
674 /* Restore bank */
675 outb(set, iobase+SSR);
676
677 return actual;
678}
679
680/*
681 * Function w83977af_dma_xmit_complete (self)
682 *
683 * The transfer of a frame in finished. So do the necessary things
684 *
685 *
686 */
687static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
688{
689 int iobase;
690 __u8 set;
691
692 IRDA_DEBUG(4, "%s(%ld)\n", __FUNCTION__ , jiffies);
693
694 IRDA_ASSERT(self != NULL, return;);
695
696 iobase = self->io.fir_base;
697
698 /* Save current set */
699 set = inb(iobase+SSR);
700
701 /* Disable DMA */
702 switch_bank(iobase, SET0);
703 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
704
705 /* Check for underrrun! */
706 if (inb(iobase+AUDR) & AUDR_UNDR) {
707 IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __FUNCTION__ );
708
709 self->stats.tx_errors++;
710 self->stats.tx_fifo_errors++;
711
712 /* Clear bit, by writing 1 to it */
713 outb(AUDR_UNDR, iobase+AUDR);
714 } else
715 self->stats.tx_packets++;
716
717
718 if (self->new_speed) {
719 w83977af_change_speed(self, self->new_speed);
720 self->new_speed = 0;
721 }
722
723 /* Unlock tx_buff and request another frame */
724 /* Tell the network layer, that we want more frames */
725 netif_wake_queue(self->netdev);
726
727 /* Restore set */
728 outb(set, iobase+SSR);
729}
730
731/*
732 * Function w83977af_dma_receive (self)
733 *
734 * Get ready for receiving a frame. The device will initiate a DMA
735 * if it starts to receive a frame.
736 *
737 */
738int w83977af_dma_receive(struct w83977af_ir *self)
739{
740 int iobase;
741 __u8 set;
742#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
743 unsigned long flags;
744 __u8 hcr;
745#endif
746 IRDA_ASSERT(self != NULL, return -1;);
747
748 IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
749
750 iobase= self->io.fir_base;
751
752 /* Save current set */
753 set = inb(iobase+SSR);
754
755 /* Disable DMA */
756 switch_bank(iobase, SET0);
757 outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
758
759 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
760 switch_bank(iobase, SET2);
761 outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
762 iobase+ADCR1);
763
764 self->io.direction = IO_RECV;
765 self->rx_buff.data = self->rx_buff.head;
766
767#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
768 spin_lock_irqsave(&self->lock, flags);
769
770 disable_dma(self->io.dma);
771 clear_dma_ff(self->io.dma);
772 set_dma_mode(self->io.dma, DMA_MODE_READ);
773 set_dma_addr(self->io.dma, self->rx_buff_dma);
774 set_dma_count(self->io.dma, self->rx_buff.truesize);
775#else
776 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
777 DMA_MODE_READ);
778#endif
779 /*
780 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
781 * important that we don't reset the Tx FIFO since it might not
782 * be finished transmitting yet
783 */
784 switch_bank(iobase, SET0);
785 outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
786 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
787
788 /* Enable DMA */
789 switch_bank(iobase, SET0);
790#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
791 hcr = inb(iobase+HCR);
792 outb(hcr | HCR_EN_DMA, iobase+HCR);
793 enable_dma(self->io.dma);
794 spin_unlock_irqrestore(&self->lock, flags);
795#else
796 outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
797#endif
798 /* Restore set */
799 outb(set, iobase+SSR);
800
801 return 0;
802}
803
804/*
805 * Function w83977af_receive_complete (self)
806 *
807 * Finished with receiving a frame
808 *
809 */
810int w83977af_dma_receive_complete(struct w83977af_ir *self)
811{
812 struct sk_buff *skb;
813 struct st_fifo *st_fifo;
814 int len;
815 int iobase;
816 __u8 set;
817 __u8 status;
818
819 IRDA_DEBUG(4, "%s\n", __FUNCTION__ );
820
821 st_fifo = &self->st_fifo;
822
823 iobase = self->io.fir_base;
824
825 /* Save current set */
826 set = inb(iobase+SSR);
827
828 iobase = self->io.fir_base;
829
830 /* Read status FIFO */
831 switch_bank(iobase, SET5);
832 while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
833 st_fifo->entries[st_fifo->tail].status = status;
834
835 st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
836 st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
837
838 st_fifo->tail++;
839 st_fifo->len++;
840 }
841
842 while (st_fifo->len) {
843 /* Get first entry */
844 status = st_fifo->entries[st_fifo->head].status;
845 len = st_fifo->entries[st_fifo->head].len;
846 st_fifo->head++;
847 st_fifo->len--;
848
849 /* Check for errors */
850 if (status & FS_FO_ERR_MSK) {
851 if (status & FS_FO_LST_FR) {
852 /* Add number of lost frames to stats */
853 self->stats.rx_errors += len;
854 } else {
855 /* Skip frame */
856 self->stats.rx_errors++;
857
858 self->rx_buff.data += len;
859
860 if (status & FS_FO_MX_LEX)
861 self->stats.rx_length_errors++;
862
863 if (status & FS_FO_PHY_ERR)
864 self->stats.rx_frame_errors++;
865
866 if (status & FS_FO_CRC_ERR)
867 self->stats.rx_crc_errors++;
868 }
869 /* The errors below can be reported in both cases */
870 if (status & FS_FO_RX_OV)
871 self->stats.rx_fifo_errors++;
872
873 if (status & FS_FO_FSF_OV)
874 self->stats.rx_fifo_errors++;
875
876 } else {
877 /* Check if we have transferred all data to memory */
878 switch_bank(iobase, SET0);
879 if (inb(iobase+USR) & USR_RDR) {
880#ifdef CONFIG_USE_INTERNAL_TIMER
881 /* Put this entry back in fifo */
882 st_fifo->head--;
883 st_fifo->len++;
884 st_fifo->entries[st_fifo->head].status = status;
885 st_fifo->entries[st_fifo->head].len = len;
886
887 /* Restore set register */
888 outb(set, iobase+SSR);
889
890 return FALSE; /* I'll be back! */
891#else
892 udelay(80); /* Should be enough!? */
893#endif
894 }
895
896 skb = dev_alloc_skb(len+1);
897 if (skb == NULL) {
898 printk(KERN_INFO
899 "%s(), memory squeeze, dropping frame.\n", __FUNCTION__);
900 /* Restore set register */
901 outb(set, iobase+SSR);
902
903 return FALSE;
904 }
905
906 /* Align to 20 bytes */
907 skb_reserve(skb, 1);
908
909 /* Copy frame without CRC */
910 if (self->io.speed < 4000000) {
911 skb_put(skb, len-2);
912 memcpy(skb->data, self->rx_buff.data, len-2);
913 } else {
914 skb_put(skb, len-4);
915 memcpy(skb->data, self->rx_buff.data, len-4);
916 }
917
918 /* Move to next frame */
919 self->rx_buff.data += len;
920 self->stats.rx_packets++;
921
922 skb->dev = self->netdev;
923 skb->mac.raw = skb->data;
924 skb->protocol = htons(ETH_P_IRDA);
925 netif_rx(skb);
926 self->netdev->last_rx = jiffies;
927 }
928 }
929 /* Restore set register */
930 outb(set, iobase+SSR);
931
932 return TRUE;
933}
934
935/*
936 * Function pc87108_pio_receive (self)
937 *
938 * Receive all data in receiver FIFO
939 *
940 */
941static void w83977af_pio_receive(struct w83977af_ir *self)
942{
943 __u8 byte = 0x00;
944 int iobase;
945
946 IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
947
948 IRDA_ASSERT(self != NULL, return;);
949
950 iobase = self->io.fir_base;
951
952 /* Receive all characters in Rx FIFO */
953 do {
954 byte = inb(iobase+RBR);
955 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
956 byte);
957 } while (inb(iobase+USR) & USR_RDR); /* Data available */
958}
959
960/*
961 * Function w83977af_sir_interrupt (self, eir)
962 *
963 * Handle SIR interrupt
964 *
965 */
966static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
967{
968 int actual;
969 __u8 new_icr = 0;
970 __u8 set;
971 int iobase;
972
973 IRDA_DEBUG(4, "%s(), isr=%#x\n", __FUNCTION__ , isr);
974
975 iobase = self->io.fir_base;
976 /* Transmit FIFO low on data */
977 if (isr & ISR_TXTH_I) {
978 /* Write data left in transmit buffer */
979 actual = w83977af_pio_write(self->io.fir_base,
980 self->tx_buff.data,
981 self->tx_buff.len,
982 self->io.fifo_size);
983
984 self->tx_buff.data += actual;
985 self->tx_buff.len -= actual;
986
987 self->io.direction = IO_XMIT;
988
989 /* Check if finished */
990 if (self->tx_buff.len > 0) {
991 new_icr |= ICR_ETXTHI;
992 } else {
993 set = inb(iobase+SSR);
994 switch_bank(iobase, SET0);
995 outb(AUDR_SFEND, iobase+AUDR);
996 outb(set, iobase+SSR);
997
998 self->stats.tx_packets++;
999
1000 /* Feed me more packets */
1001 netif_wake_queue(self->netdev);
1002 new_icr |= ICR_ETBREI;
1003 }
1004 }
1005 /* Check if transmission has completed */
1006 if (isr & ISR_TXEMP_I) {
1007 /* Check if we need to change the speed? */
1008 if (self->new_speed) {
1009 IRDA_DEBUG(2,
1010 "%s(), Changing speed!\n", __FUNCTION__ );
1011 w83977af_change_speed(self, self->new_speed);
1012 self->new_speed = 0;
1013 }
1014
1015 /* Turn around and get ready to receive some data */
1016 self->io.direction = IO_RECV;
1017 new_icr |= ICR_ERBRI;
1018 }
1019
1020 /* Rx FIFO threshold or timeout */
1021 if (isr & ISR_RXTH_I) {
1022 w83977af_pio_receive(self);
1023
1024 /* Keep receiving */
1025 new_icr |= ICR_ERBRI;
1026 }
1027 return new_icr;
1028}
1029
1030/*
1031 * Function pc87108_fir_interrupt (self, eir)
1032 *
1033 * Handle MIR/FIR interrupt
1034 *
1035 */
1036static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
1037{
1038 __u8 new_icr = 0;
1039 __u8 set;
1040 int iobase;
1041
1042 iobase = self->io.fir_base;
1043 set = inb(iobase+SSR);
1044
1045 /* End of frame detected in FIFO */
1046 if (isr & (ISR_FEND_I|ISR_FSF_I)) {
1047 if (w83977af_dma_receive_complete(self)) {
1048
1049 /* Wait for next status FIFO interrupt */
1050 new_icr |= ICR_EFSFI;
1051 } else {
1052 /* DMA not finished yet */
1053
1054 /* Set timer value, resolution 1 ms */
1055 switch_bank(iobase, SET4);
1056 outb(0x01, iobase+TMRL); /* 1 ms */
1057 outb(0x00, iobase+TMRH);
1058
1059 /* Start timer */
1060 outb(IR_MSL_EN_TMR, iobase+IR_MSL);
1061
1062 new_icr |= ICR_ETMRI;
1063 }
1064 }
1065 /* Timer finished */
1066 if (isr & ISR_TMR_I) {
1067 /* Disable timer */
1068 switch_bank(iobase, SET4);
1069 outb(0, iobase+IR_MSL);
1070
1071 /* Clear timer event */
1072 /* switch_bank(iobase, SET0); */
1073/* outb(ASCR_CTE, iobase+ASCR); */
1074
1075 /* Check if this is a TX timer interrupt */
1076 if (self->io.direction == IO_XMIT) {
1077 w83977af_dma_write(self, iobase);
1078
1079 new_icr |= ICR_EDMAI;
1080 } else {
1081 /* Check if DMA has now finished */
1082 w83977af_dma_receive_complete(self);
1083
1084 new_icr |= ICR_EFSFI;
1085 }
1086 }
1087 /* Finished with DMA */
1088 if (isr & ISR_DMA_I) {
1089 w83977af_dma_xmit_complete(self);
1090
1091 /* Check if there are more frames to be transmitted */
1092 /* if (irda_device_txqueue_empty(self)) { */
1093
1094 /* Prepare for receive
1095 *
1096 * ** Netwinder Tx DMA likes that we do this anyway **
1097 */
1098 w83977af_dma_receive(self);
1099 new_icr = ICR_EFSFI;
1100 /* } */
1101 }
1102
1103 /* Restore set */
1104 outb(set, iobase+SSR);
1105
1106 return new_icr;
1107}
1108
1109/*
1110 * Function w83977af_interrupt (irq, dev_id, regs)
1111 *
1112 * An interrupt from the chip has arrived. Time to do some work
1113 *
1114 */
1115static irqreturn_t w83977af_interrupt(int irq, void *dev_id,
1116 struct pt_regs *regs)
1117{
1118 struct net_device *dev = (struct net_device *) dev_id;
1119 struct w83977af_ir *self;
1120 __u8 set, icr, isr;
1121 int iobase;
1122
1123 if (!dev) {
1124 printk(KERN_WARNING "%s: irq %d for unknown device.\n",
1125 driver_name, irq);
1126 return IRQ_NONE;
1127 }
1128 self = (struct w83977af_ir *) dev->priv;
1129
1130 iobase = self->io.fir_base;
1131
1132 /* Save current bank */
1133 set = inb(iobase+SSR);
1134 switch_bank(iobase, SET0);
1135
1136 icr = inb(iobase+ICR);
1137 isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
1138
1139 outb(0, iobase+ICR); /* Disable interrupts */
1140
1141 if (isr) {
1142 /* Dispatch interrupt handler for the current speed */
1143 if (self->io.speed > PIO_MAX_SPEED )
1144 icr = w83977af_fir_interrupt(self, isr);
1145 else
1146 icr = w83977af_sir_interrupt(self, isr);
1147 }
1148
1149 outb(icr, iobase+ICR); /* Restore (new) interrupts */
1150 outb(set, iobase+SSR); /* Restore bank register */
1151 return IRQ_RETVAL(isr);
1152}
1153
1154/*
1155 * Function w83977af_is_receiving (self)
1156 *
1157 * Return TRUE is we are currently receiving a frame
1158 *
1159 */
1160static int w83977af_is_receiving(struct w83977af_ir *self)
1161{
1162 int status = FALSE;
1163 int iobase;
1164 __u8 set;
1165
1166 IRDA_ASSERT(self != NULL, return FALSE;);
1167
1168 if (self->io.speed > 115200) {
1169 iobase = self->io.fir_base;
1170
1171 /* Check if rx FIFO is not empty */
1172 set = inb(iobase+SSR);
1173 switch_bank(iobase, SET2);
1174 if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
1175 /* We are receiving something */
1176 status = TRUE;
1177 }
1178 outb(set, iobase+SSR);
1179 } else
1180 status = (self->rx_buff.state != OUTSIDE_FRAME);
1181
1182 return status;
1183}
1184
1185/*
1186 * Function w83977af_net_open (dev)
1187 *
1188 * Start the device
1189 *
1190 */
1191static int w83977af_net_open(struct net_device *dev)
1192{
1193 struct w83977af_ir *self;
1194 int iobase;
1195 char hwname[32];
1196 __u8 set;
1197
1198 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
1199
1200 IRDA_ASSERT(dev != NULL, return -1;);
1201 self = (struct w83977af_ir *) dev->priv;
1202
1203 IRDA_ASSERT(self != NULL, return 0;);
1204
1205 iobase = self->io.fir_base;
1206
1207 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
1208 (void *) dev)) {
1209 return -EAGAIN;
1210 }
1211 /*
1212 * Always allocate the DMA channel after the IRQ,
1213 * and clean up on failure.
1214 */
1215 if (request_dma(self->io.dma, dev->name)) {
1216 free_irq(self->io.irq, self);
1217 return -EAGAIN;
1218 }
1219
1220 /* Save current set */
1221 set = inb(iobase+SSR);
1222
1223 /* Enable some interrupts so we can receive frames again */
1224 switch_bank(iobase, SET0);
1225 if (self->io.speed > 115200) {
1226 outb(ICR_EFSFI, iobase+ICR);
1227 w83977af_dma_receive(self);
1228 } else
1229 outb(ICR_ERBRI, iobase+ICR);
1230
1231 /* Restore bank register */
1232 outb(set, iobase+SSR);
1233
1234 /* Ready to play! */
1235 netif_start_queue(dev);
1236
1237 /* Give self a hardware name */
1238 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
1239
1240 /*
1241 * Open new IrLAP layer instance, now that everything should be
1242 * initialized properly
1243 */
1244 self->irlap = irlap_open(dev, &self->qos, hwname);
1245
1246 return 0;
1247}
1248
1249/*
1250 * Function w83977af_net_close (dev)
1251 *
1252 * Stop the device
1253 *
1254 */
1255static int w83977af_net_close(struct net_device *dev)
1256{
1257 struct w83977af_ir *self;
1258 int iobase;
1259 __u8 set;
1260
1261 IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
1262
1263 IRDA_ASSERT(dev != NULL, return -1;);
1264
1265 self = (struct w83977af_ir *) dev->priv;
1266
1267 IRDA_ASSERT(self != NULL, return 0;);
1268
1269 iobase = self->io.fir_base;
1270
1271 /* Stop device */
1272 netif_stop_queue(dev);
1273
1274 /* Stop and remove instance of IrLAP */
1275 if (self->irlap)
1276 irlap_close(self->irlap);
1277 self->irlap = NULL;
1278
1279 disable_dma(self->io.dma);
1280
1281 /* Save current set */
1282 set = inb(iobase+SSR);
1283
1284 /* Disable interrupts */
1285 switch_bank(iobase, SET0);
1286 outb(0, iobase+ICR);
1287
1288 free_irq(self->io.irq, dev);
1289 free_dma(self->io.dma);
1290
1291 /* Restore bank register */
1292 outb(set, iobase+SSR);
1293
1294 return 0;
1295}
1296
1297/*
1298 * Function w83977af_net_ioctl (dev, rq, cmd)
1299 *
1300 * Process IOCTL commands for this device
1301 *
1302 */
1303static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1304{
1305 struct if_irda_req *irq = (struct if_irda_req *) rq;
1306 struct w83977af_ir *self;
1307 unsigned long flags;
1308 int ret = 0;
1309
1310 IRDA_ASSERT(dev != NULL, return -1;);
1311
1312 self = dev->priv;
1313
1314 IRDA_ASSERT(self != NULL, return -1;);
1315
1316 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__ , dev->name, cmd);
1317
1318 spin_lock_irqsave(&self->lock, flags);
1319
1320 switch (cmd) {
1321 case SIOCSBANDWIDTH: /* Set bandwidth */
1322 if (!capable(CAP_NET_ADMIN)) {
1323 ret = -EPERM;
1324 goto out;
1325 }
1326 w83977af_change_speed(self, irq->ifr_baudrate);
1327 break;
1328 case SIOCSMEDIABUSY: /* Set media busy */
1329 if (!capable(CAP_NET_ADMIN)) {
1330 ret = -EPERM;
1331 goto out;
1332 }
1333 irda_device_set_media_busy(self->netdev, TRUE);
1334 break;
1335 case SIOCGRECEIVING: /* Check if we are receiving right now */
1336 irq->ifr_receiving = w83977af_is_receiving(self);
1337 break;
1338 default:
1339 ret = -EOPNOTSUPP;
1340 }
1341out:
1342 spin_unlock_irqrestore(&self->lock, flags);
1343 return ret;
1344}
1345
1346static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
1347{
1348 struct w83977af_ir *self = (struct w83977af_ir *) dev->priv;
1349
1350 return &self->stats;
1351}
1352
1353MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1354MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1355MODULE_LICENSE("GPL");
1356
1357
1358module_param(qos_mtt_bits, int, 0);
1359MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
1360module_param_array(io, int, NULL, 0);
1361MODULE_PARM_DESC(io, "Base I/O addresses");
1362module_param_array(irq, int, NULL, 0);
1363MODULE_PARM_DESC(irq, "IRQ lines");
1364
1365/*
1366 * Function init_module (void)
1367 *
1368 *
1369 *
1370 */
1371module_init(w83977af_init);
1372
1373/*
1374 * Function cleanup_module (void)
1375 *
1376 *
1377 *
1378 */
1379module_exit(w83977af_cleanup);
diff --git a/drivers/net/irda/w83977af_ir.h b/drivers/net/irda/w83977af_ir.h
new file mode 100644
index 000000000000..0b7661deafee
--- /dev/null
+++ b/drivers/net/irda/w83977af_ir.h
@@ -0,0 +1,199 @@
1/*********************************************************************
2 *
3 * Filename: w83977af_ir.h
4 * Version:
5 * Description:
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Thu Nov 19 13:55:34 1998
9 * Modified at: Tue Jan 11 13:08:19 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 *
12 * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License as
16 * published by the Free Software Foundation; either version 2 of
17 * the License, or (at your option) any later version.
18 *
19 * Neither Dag Brattli nor University of Tromsų admit liability nor
20 * provide warranty for any of this software. This material is
21 * provided "AS-IS" and at no charge.
22 *
23 ********************************************************************/
24
25#ifndef W83977AF_IR_H
26#define W83977AF_IR_H
27
28#include <asm/io.h>
29#include <linux/types.h>
30
31/* Flags for configuration register CRF0 */
32#define ENBNKSEL 0x01
33#define APEDCRC 0x02
34#define TXW4C 0x04
35#define RXW4C 0x08
36
37/* Bank 0 */
38#define RBR 0x00 /* Receiver buffer register */
39#define TBR 0x00 /* Transmitter buffer register */
40
41#define ICR 0x01 /* Interrupt configuration register */
42#define ICR_ERBRI 0x01 /* Receiver buffer register interrupt */
43#define ICR_ETBREI 0x02 /* Transeiver empty interrupt */
44#define ICR_EUSRI 0x04//* IR status interrupt */
45#define ICR_EHSRI 0x04
46#define ICR_ETXURI 0x04 /* Tx underrun */
47#define ICR_EDMAI 0x10 /* DMA interrupt */
48#define ICR_ETXTHI 0x20 /* Transmitter threshold interrupt */
49#define ICR_EFSFI 0x40 /* Frame status FIFO interrupt */
50#define ICR_ETMRI 0x80 /* Timer interrupt */
51
52#define UFR 0x02 /* FIFO control register */
53#define UFR_EN_FIFO 0x01 /* Enable FIFO's */
54#define UFR_RXF_RST 0x02 /* Reset Rx FIFO */
55#define UFR_TXF_RST 0x04 /* Reset Tx FIFO */
56#define UFR_RXTL 0x80 /* Rx FIFO threshold (set to 16) */
57#define UFR_TXTL 0x20 /* Tx FIFO threshold (set to 17) */
58
59#define ISR 0x02 /* Interrupt status register */
60#define ISR_RXTH_I 0x01 /* Receive threshold interrupt */
61#define ISR_TXEMP_I 0x02 /* Transmitter empty interrupt */
62#define ISR_FEND_I 0x04
63#define ISR_DMA_I 0x10
64#define ISR_TXTH_I 0x20 /* Transmitter threshold interrupt */
65#define ISR_FSF_I 0x40
66#define ISR_TMR_I 0x80 /* Timer interrupt */
67
68#define UCR 0x03 /* Uart control register */
69#define UCR_DLS8 0x03 /* 8N1 */
70
71#define SSR 0x03 /* Sets select register */
72#define SET0 UCR_DLS8 /* Make sure we keep 8N1 */
73#define SET1 (0x80|UCR_DLS8) /* Make sure we keep 8N1 */
74#define SET2 0xE0
75#define SET3 0xE4
76#define SET4 0xE8
77#define SET5 0xEC
78#define SET6 0xF0
79#define SET7 0xF4
80
81#define HCR 0x04
82#define HCR_MODE_MASK ~(0xD0)
83#define HCR_SIR 0x60
84#define HCR_MIR_576 0x20
85#define HCR_MIR_1152 0x80
86#define HCR_FIR 0xA0
87#define HCR_EN_DMA 0x04
88#define HCR_EN_IRQ 0x08
89#define HCR_TX_WT 0x08
90
91#define USR 0x05 /* IR status register */
92#define USR_RDR 0x01 /* Receive data ready */
93#define USR_TSRE 0x40 /* Transmitter empty? */
94
95#define AUDR 0x07
96#define AUDR_SFEND 0x08 /* Set a frame end */
97#define AUDR_RXBSY 0x20 /* Rx busy */
98#define AUDR_UNDR 0x40 /* Transeiver underrun */
99
100/* Set 2 */
101#define ABLL 0x00 /* Advanced baud rate divisor latch (low byte) */
102#define ABHL 0x01 /* Advanced baud rate divisor latch (high byte) */
103
104#define ADCR1 0x02
105#define ADCR1_ADV_SL 0x01
106#define ADCR1_D_CHSW 0x08 /* the specs are wrong. its bit 3, not 4 */
107#define ADCR1_DMA_F 0x02
108
109#define ADCR2 0x04
110#define ADCR2_TXFS32 0x01
111#define ADCR2_RXFS32 0x04
112
113#define RXFDTH 0x07
114
115/* Set 3 */
116#define AUID 0x00
117
118/* Set 4 */
119#define TMRL 0x00 /* Timer value register (low byte) */
120#define TMRH 0x01 /* Timer value register (high byte) */
121
122#define IR_MSL 0x02 /* Infrared mode select */
123#define IR_MSL_EN_TMR 0x01 /* Enable timer */
124
125#define TFRLL 0x04 /* Transmitter frame length (low byte) */
126#define TFRLH 0x05 /* Transmitter frame length (high byte) */
127#define RFRLL 0x06 /* Receiver frame length (low byte) */
128#define RFRLH 0x07 /* Receiver frame length (high byte) */
129
130/* Set 5 */
131
132#define FS_FO 0x05 /* Frame status FIFO */
133#define FS_FO_FSFDR 0x80 /* Frame status FIFO data ready */
134#define FS_FO_LST_FR 0x40 /* Frame lost */
135#define FS_FO_MX_LEX 0x10 /* Max frame len exceeded */
136#define FS_FO_PHY_ERR 0x08 /* Physical layer error */
137#define FS_FO_CRC_ERR 0x04
138#define FS_FO_RX_OV 0x02 /* Receive overrun */
139#define FS_FO_FSF_OV 0x01 /* Frame status FIFO overrun */
140#define FS_FO_ERR_MSK 0x5f /* Error mask */
141
142#define RFLFL 0x06
143#define RFLFH 0x07
144
145/* Set 6 */
146#define IR_CFG2 0x00
147#define IR_CFG2_DIS_CRC 0x02
148
149/* Set 7 */
150#define IRM_CR 0x07 /* Infrared module control register */
151#define IRM_CR_IRX_MSL 0x40
152#define IRM_CR_AF_MNT 0x80 /* Automatic format */
153
154/* For storing entries in the status FIFO */
155struct st_fifo_entry {
156 int status;
157 int len;
158};
159
160struct st_fifo {
161 struct st_fifo_entry entries[10];
162 int head;
163 int tail;
164 int len;
165};
166
167/* Private data for each instance */
168struct w83977af_ir {
169 struct st_fifo st_fifo;
170
171 int tx_buff_offsets[10]; /* Offsets between frames in tx_buff */
172 int tx_len; /* Number of frames in tx_buff */
173
174 struct net_device *netdev; /* Yes! we are some kind of netdevice */
175 struct net_device_stats stats;
176
177 struct irlap_cb *irlap; /* The link layer we are binded to */
178 struct qos_info qos; /* QoS capabilities for this device */
179
180 chipio_t io; /* IrDA controller information */
181 iobuff_t tx_buff; /* Transmit buffer */
182 iobuff_t rx_buff; /* Receive buffer */
183 dma_addr_t tx_buff_dma;
184 dma_addr_t rx_buff_dma;
185
186 /* Note : currently locking is *very* incomplete, but this
187 * will get you started. Check in nsc-ircc.c for a proper
188 * locking strategy. - Jean II */
189 spinlock_t lock; /* For serializing operations */
190
191 __u32 new_speed;
192};
193
194static inline void switch_bank( int iobase, int set)
195{
196 outb(set, iobase+SSR);
197}
198
199#endif