aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorKrzysztof Hałasa <khc@pm.waw.pl>2008-07-02 11:47:52 -0400
committerKrzysztof Hałasa <khc@pm.waw.pl>2008-07-23 17:05:55 -0400
commit52e8a6a2d8dc19002d1757870d16051157ce999c (patch)
tree32e65d1160f279861c3667a031fdd20df456ffdd /drivers/net
parentaca257530f7d681b953961090ad729c32aa5ad62 (diff)
WAN: Convert Zilog-based drivers to generic HDLC
Signed-off-by: Krzysztof Hałasa <khc@pm.waw.pl>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wan/Kconfig4
-rw-r--r--drivers/net/wan/Makefile6
-rw-r--r--drivers/net/wan/hostess_sv11.c382
-rw-r--r--drivers/net/wan/sealevel.c361
-rw-r--r--drivers/net/wan/z85230.c193
-rw-r--r--drivers/net/wan/z85230.h10
6 files changed, 382 insertions, 574 deletions
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index e08cd4bf7f51..04c714aa7a6a 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -25,7 +25,7 @@ if WAN
25# There is no way to detect a comtrol sv11 - force it modular for now. 25# There is no way to detect a comtrol sv11 - force it modular for now.
26config HOSTESS_SV11 26config HOSTESS_SV11
27 tristate "Comtrol Hostess SV-11 support" 27 tristate "Comtrol Hostess SV-11 support"
28 depends on ISA && m && ISA_DMA_API && INET 28 depends on ISA && m && ISA_DMA_API && INET && HDLC
29 help 29 help
30 Driver for Comtrol Hostess SV-11 network card which 30 Driver for Comtrol Hostess SV-11 network card which
31 operates on low speed synchronous serial links at up to 31 operates on low speed synchronous serial links at up to
@@ -88,7 +88,7 @@ config LANMEDIA
88# There is no way to detect a Sealevel board. Force it modular 88# There is no way to detect a Sealevel board. Force it modular
89config SEALEVEL_4021 89config SEALEVEL_4021
90 tristate "Sealevel Systems 4021 support" 90 tristate "Sealevel Systems 4021 support"
91 depends on ISA && m && ISA_DMA_API && INET 91 depends on ISA && m && ISA_DMA_API && INET && HDLC
92 help 92 help
93 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. 93 This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
94 94
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 9d085e0f0f44..5d27a17792cb 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -21,11 +21,11 @@ pc300-y := pc300_drv.o
21pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o 21pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o
22pc300-objs := $(pc300-y) 22pc300-objs := $(pc300-y)
23 23
24obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o 24obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
25obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o 25obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
26obj-$(CONFIG_COSA) += cosa.o 26obj-$(CONFIG_COSA) += cosa.o
27obj-$(CONFIG_FARSYNC) += farsync.o 27obj-$(CONFIG_FARSYNC) += farsync.o
28obj-$(CONFIG_DSCC4) += dscc4.o 28obj-$(CONFIG_DSCC4) += dscc4.o
29obj-$(CONFIG_LANMEDIA) += syncppp.o 29obj-$(CONFIG_LANMEDIA) += syncppp.o
30obj-$(CONFIG_X25_ASY) += x25_asy.o 30obj-$(CONFIG_X25_ASY) += x25_asy.o
31 31
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index f3065d3473fd..e299313f828a 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -16,6 +16,8 @@
16 * touching control registers. 16 * touching control registers.
17 * 17 *
18 * Port B isnt wired (why - beats me) 18 * Port B isnt wired (why - beats me)
19 *
20 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
19 */ 21 */
20 22
21#include <linux/module.h> 23#include <linux/module.h>
@@ -26,6 +28,7 @@
26#include <linux/netdevice.h> 28#include <linux/netdevice.h>
27#include <linux/if_arp.h> 29#include <linux/if_arp.h>
28#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/hdlc.h>
29#include <linux/ioport.h> 32#include <linux/ioport.h>
30#include <net/arp.h> 33#include <net/arp.h>
31 34
@@ -33,34 +36,31 @@
33#include <asm/io.h> 36#include <asm/io.h>
34#include <asm/dma.h> 37#include <asm/dma.h>
35#include <asm/byteorder.h> 38#include <asm/byteorder.h>
36#include <net/syncppp.h>
37#include "z85230.h" 39#include "z85230.h"
38 40
39static int dma; 41static int dma;
40 42
41struct sv11_device
42{
43 void *if_ptr; /* General purpose pointer (used by SPPP) */
44 struct z8530_dev sync;
45 struct ppp_device netdev;
46};
47
48/* 43/*
49 * Network driver support routines 44 * Network driver support routines
50 */ 45 */
51 46
47static inline struct z8530_dev* dev_to_sv(struct net_device *dev)
48{
49 return (struct z8530_dev *)dev_to_hdlc(dev)->priv;
50}
51
52/* 52/*
53 * Frame receive. Simple for our card as we do sync ppp and there 53 * Frame receive. Simple for our card as we do HDLC and there
54 * is no funny garbage involved 54 * is no funny garbage involved
55 */ 55 */
56 56
57static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) 57static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
58{ 58{
59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 59 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
60 skb_trim(skb, skb->len-2); 60 skb_trim(skb, skb->len - 2);
61 skb->protocol=__constant_htons(ETH_P_WAN_PPP); 61 skb->protocol = hdlc_type_trans(skb, c->netdevice);
62 skb_reset_mac_header(skb); 62 skb_reset_mac_header(skb);
63 skb->dev=c->netdevice; 63 skb->dev = c->netdevice;
64 /* 64 /*
65 * Send it to the PPP layer. We don't have time to process 65 * Send it to the PPP layer. We don't have time to process
66 * it right now. 66 * it right now.
@@ -68,56 +68,51 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
68 netif_rx(skb); 68 netif_rx(skb);
69 c->netdevice->last_rx = jiffies; 69 c->netdevice->last_rx = jiffies;
70} 70}
71 71
72/* 72/*
73 * We've been placed in the UP state 73 * We've been placed in the UP state
74 */ 74 */
75 75
76static int hostess_open(struct net_device *d) 76static int hostess_open(struct net_device *d)
77{ 77{
78 struct sv11_device *sv11=d->ml_priv; 78 struct z8530_dev *sv11 = dev_to_sv(d);
79 int err = -1; 79 int err = -1;
80 80
81 /* 81 /*
82 * Link layer up 82 * Link layer up
83 */ 83 */
84 switch(dma) 84 switch (dma) {
85 {
86 case 0: 85 case 0:
87 err=z8530_sync_open(d, &sv11->sync.chanA); 86 err = z8530_sync_open(d, &sv11->chanA);
88 break; 87 break;
89 case 1: 88 case 1:
90 err=z8530_sync_dma_open(d, &sv11->sync.chanA); 89 err = z8530_sync_dma_open(d, &sv11->chanA);
91 break; 90 break;
92 case 2: 91 case 2:
93 err=z8530_sync_txdma_open(d, &sv11->sync.chanA); 92 err = z8530_sync_txdma_open(d, &sv11->chanA);
94 break; 93 break;
95 } 94 }
96 95
97 if(err) 96 if (err)
98 return err; 97 return err;
99 /* 98
100 * Begin PPP 99 err = hdlc_open(d);
101 */ 100 if (err) {
102 err=sppp_open(d); 101 switch (dma) {
103 if(err)
104 {
105 switch(dma)
106 {
107 case 0: 102 case 0:
108 z8530_sync_close(d, &sv11->sync.chanA); 103 z8530_sync_close(d, &sv11->chanA);
109 break; 104 break;
110 case 1: 105 case 1:
111 z8530_sync_dma_close(d, &sv11->sync.chanA); 106 z8530_sync_dma_close(d, &sv11->chanA);
112 break; 107 break;
113 case 2: 108 case 2:
114 z8530_sync_txdma_close(d, &sv11->sync.chanA); 109 z8530_sync_txdma_close(d, &sv11->chanA);
115 break; 110 break;
116 } 111 }
117 return err; 112 return err;
118 } 113 }
119 sv11->sync.chanA.rx_function=hostess_input; 114 sv11->chanA.rx_function = hostess_input;
120 115
121 /* 116 /*
122 * Go go go 117 * Go go go
123 */ 118 */
@@ -128,30 +123,24 @@ static int hostess_open(struct net_device *d)
128 123
129static int hostess_close(struct net_device *d) 124static int hostess_close(struct net_device *d)
130{ 125{
131 struct sv11_device *sv11=d->ml_priv; 126 struct z8530_dev *sv11 = dev_to_sv(d);
132 /* 127 /*
133 * Discard new frames 128 * Discard new frames
134 */ 129 */
135 sv11->sync.chanA.rx_function=z8530_null_rx; 130 sv11->chanA.rx_function = z8530_null_rx;
136 /* 131
137 * PPP off 132 hdlc_close(d);
138 */
139 sppp_close(d);
140 /*
141 * Link layer down
142 */
143 netif_stop_queue(d); 133 netif_stop_queue(d);
144 134
145 switch(dma) 135 switch (dma) {
146 {
147 case 0: 136 case 0:
148 z8530_sync_close(d, &sv11->sync.chanA); 137 z8530_sync_close(d, &sv11->chanA);
149 break; 138 break;
150 case 1: 139 case 1:
151 z8530_sync_dma_close(d, &sv11->sync.chanA); 140 z8530_sync_dma_close(d, &sv11->chanA);
152 break; 141 break;
153 case 2: 142 case 2:
154 z8530_sync_txdma_close(d, &sv11->sync.chanA); 143 z8530_sync_txdma_close(d, &sv11->chanA);
155 break; 144 break;
156 } 145 }
157 return 0; 146 return 0;
@@ -159,232 +148,174 @@ static int hostess_close(struct net_device *d)
159 148
160static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 149static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
161{ 150{
162 /* struct sv11_device *sv11=d->ml_priv; 151 /* struct z8530_dev *sv11=dev_to_sv(d);
163 z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ 152 z8530_ioctl(d,&sv11->chanA,ifr,cmd) */
164 return sppp_do_ioctl(d, ifr,cmd); 153 return hdlc_ioctl(d, ifr, cmd);
165}
166
167static struct net_device_stats *hostess_get_stats(struct net_device *d)
168{
169 struct sv11_device *sv11=d->ml_priv;
170 if(sv11)
171 return z8530_get_stats(&sv11->sync.chanA);
172 else
173 return NULL;
174} 154}
175 155
176/* 156/*
177 * Passed PPP frames, fire them downwind. 157 * Passed network frames, fire them downwind.
178 */ 158 */
179 159
180static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) 160static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d)
181{ 161{
182 struct sv11_device *sv11=d->ml_priv; 162 return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb);
183 return z8530_queue_xmit(&sv11->sync.chanA, skb);
184} 163}
185 164
186static int hostess_neigh_setup(struct neighbour *n) 165static int hostess_attach(struct net_device *dev, unsigned short encoding,
166 unsigned short parity)
187{ 167{
188 if (n->nud_state == NUD_NONE) { 168 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
189 n->ops = &arp_broken_ops; 169 return 0;
190 n->output = n->ops->output; 170 return -EINVAL;
191 }
192 return 0;
193}
194
195static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
196{
197 if (p->tbl->family == AF_INET) {
198 p->neigh_setup = hostess_neigh_setup;
199 p->ucast_probes = 0;
200 p->mcast_probes = 0;
201 }
202 return 0;
203}
204
205static void sv11_setup(struct net_device *dev)
206{
207 dev->open = hostess_open;
208 dev->stop = hostess_close;
209 dev->hard_start_xmit = hostess_queue_xmit;
210 dev->get_stats = hostess_get_stats;
211 dev->do_ioctl = hostess_ioctl;
212 dev->neigh_setup = hostess_neigh_setup_dev;
213} 171}
214 172
215/* 173/*
216 * Description block for a Comtrol Hostess SV11 card 174 * Description block for a Comtrol Hostess SV11 card
217 */ 175 */
218 176
219static struct sv11_device *sv11_init(int iobase, int irq) 177static struct z8530_dev *sv11_init(int iobase, int irq)
220{ 178{
221 struct z8530_dev *dev; 179 struct z8530_dev *sv;
222 struct sv11_device *sv; 180 struct net_device *netdev;
223
224 /* 181 /*
225 * Get the needed I/O space 182 * Get the needed I/O space
226 */ 183 */
227 184
228 if(!request_region(iobase, 8, "Comtrol SV11")) 185 if (!request_region(iobase, 8, "Comtrol SV11")) {
229 { 186 printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n",
230 printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase); 187 iobase);
231 return NULL; 188 return NULL;
232 } 189 }
233 190
234 sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL); 191 sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL);
235 if(!sv) 192 if (!sv)
236 goto fail3; 193 goto err_kzalloc;
237 194
238 sv->if_ptr=&sv->netdev;
239
240 sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup);
241 if(!sv->netdev.dev)
242 goto fail2;
243
244 dev=&sv->sync;
245
246 /* 195 /*
247 * Stuff in the I/O addressing 196 * Stuff in the I/O addressing
248 */ 197 */
249 198
250 dev->active = 0; 199 sv->active = 0;
251 200
252 dev->chanA.ctrlio=iobase+1; 201 sv->chanA.ctrlio = iobase + 1;
253 dev->chanA.dataio=iobase+3; 202 sv->chanA.dataio = iobase + 3;
254 dev->chanB.ctrlio=-1; 203 sv->chanB.ctrlio = -1;
255 dev->chanB.dataio=-1; 204 sv->chanB.dataio = -1;
256 dev->chanA.irqs=&z8530_nop; 205 sv->chanA.irqs = &z8530_nop;
257 dev->chanB.irqs=&z8530_nop; 206 sv->chanB.irqs = &z8530_nop;
258 207
259 outb(0, iobase+4); /* DMA off */ 208 outb(0, iobase + 4); /* DMA off */
260 209
261 /* We want a fast IRQ for this device. Actually we'd like an even faster 210 /* We want a fast IRQ for this device. Actually we'd like an even faster
262 IRQ ;) - This is one driver RtLinux is made for */ 211 IRQ ;) - This is one driver RtLinux is made for */
263 212
264 if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "Hostess SV11", dev)<0) 213 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
265 { 214 "Hostess SV11", sv) < 0) {
266 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); 215 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
267 goto fail1; 216 goto err_irq;
268 } 217 }
269 218
270 dev->irq=irq; 219 sv->irq = irq;
271 dev->chanA.private=sv; 220 sv->chanA.private = sv;
272 dev->chanA.netdevice=sv->netdev.dev; 221 sv->chanA.dev = sv;
273 dev->chanA.dev=dev; 222 sv->chanB.dev = sv;
274 dev->chanB.dev=dev; 223
275 224 if (dma) {
276 if(dma)
277 {
278 /* 225 /*
279 * You can have DMA off or 1 and 3 thats the lot 226 * You can have DMA off or 1 and 3 thats the lot
280 * on the Comtrol. 227 * on the Comtrol.
281 */ 228 */
282 dev->chanA.txdma=3; 229 sv->chanA.txdma = 3;
283 dev->chanA.rxdma=1; 230 sv->chanA.rxdma = 1;
284 outb(0x03|0x08, iobase+4); /* DMA on */ 231 outb(0x03 | 0x08, iobase + 4); /* DMA on */
285 if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0) 232 if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)"))
286 goto fail; 233 goto err_txdma;
287 234
288 if(dma==1) 235 if (dma == 1)
289 { 236 if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)"))
290 if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0) 237 goto err_rxdma;
291 goto dmafail;
292 }
293 } 238 }
294 239
295 /* Kill our private IRQ line the hostess can end up chattering 240 /* Kill our private IRQ line the hostess can end up chattering
296 until the configuration is set */ 241 until the configuration is set */
297 disable_irq(irq); 242 disable_irq(irq);
298 243
299 /* 244 /*
300 * Begin normal initialise 245 * Begin normal initialise
301 */ 246 */
302 247
303 if(z8530_init(dev)!=0) 248 if (z8530_init(sv)) {
304 {
305 printk(KERN_ERR "Z8530 series device not found.\n"); 249 printk(KERN_ERR "Z8530 series device not found.\n");
306 enable_irq(irq); 250 enable_irq(irq);
307 goto dmafail2; 251 goto free_dma;
308 } 252 }
309 z8530_channel_load(&dev->chanB, z8530_dead_port); 253 z8530_channel_load(&sv->chanB, z8530_dead_port);
310 if(dev->type==Z85C30) 254 if (sv->type == Z85C30)
311 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 255 z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream);
312 else 256 else
313 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 257 z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230);
314 258
315 enable_irq(irq); 259 enable_irq(irq);
316
317 260
318 /* 261 /*
319 * Now we can take the IRQ 262 * Now we can take the IRQ
320 */ 263 */
321 if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0)
322 {
323 struct net_device *d=dev->chanA.netdevice;
324 264
325 /* 265 sv->chanA.netdevice = netdev = alloc_hdlcdev(sv);
326 * Initialise the PPP components 266 if (!netdev)
327 */ 267 goto free_dma;
328 d->ml_priv = sv;
329 sppp_attach(&sv->netdev);
330
331 /*
332 * Local fields
333 */
334
335 d->base_addr = iobase;
336 d->irq = irq;
337
338 if(register_netdev(d))
339 {
340 printk(KERN_ERR "%s: unable to register device.\n",
341 d->name);
342 sppp_detach(d);
343 goto dmafail2;
344 }
345 268
346 z8530_describe(dev, "I/O", iobase); 269 dev_to_hdlc(netdev)->attach = hostess_attach;
347 dev->active=1; 270 dev_to_hdlc(netdev)->xmit = hostess_queue_xmit;
348 return sv; 271 netdev->open = hostess_open;
272 netdev->stop = hostess_close;
273 netdev->do_ioctl = hostess_ioctl;
274 netdev->base_addr = iobase;
275 netdev->irq = irq;
276
277 if (register_hdlc_device(netdev)) {
278 printk(KERN_ERR "hostess: unable to register HDLC device.\n");
279 free_netdev(netdev);
280 goto free_dma;
349 } 281 }
350dmafail2: 282
351 if(dma==1) 283 z8530_describe(sv, "I/O", iobase);
352 free_dma(dev->chanA.rxdma); 284 sv->active = 1;
353dmafail: 285 return sv;
354 if(dma) 286
355 free_dma(dev->chanA.txdma); 287free_dma:
356fail: 288 if (dma == 1)
357 free_irq(irq, dev); 289 free_dma(sv->chanA.rxdma);
358fail1: 290err_rxdma:
359 free_netdev(sv->netdev.dev); 291 if (dma)
360fail2: 292 free_dma(sv->chanA.txdma);
293err_txdma:
294 free_irq(irq, sv);
295err_irq:
361 kfree(sv); 296 kfree(sv);
362fail3: 297err_kzalloc:
363 release_region(iobase,8); 298 release_region(iobase, 8);
364 return NULL; 299 return NULL;
365} 300}
366 301
367static void sv11_shutdown(struct sv11_device *dev) 302static void sv11_shutdown(struct z8530_dev *dev)
368{ 303{
369 sppp_detach(dev->netdev.dev); 304 unregister_hdlc_device(dev->chanA.netdevice);
370 unregister_netdev(dev->netdev.dev); 305 z8530_shutdown(dev);
371 z8530_shutdown(&dev->sync); 306 free_irq(dev->irq, dev);
372 free_irq(dev->sync.irq, dev); 307 if (dma) {
373 if(dma) 308 if (dma == 1)
374 { 309 free_dma(dev->chanA.rxdma);
375 if(dma==1) 310 free_dma(dev->chanA.txdma);
376 free_dma(dev->sync.chanA.rxdma);
377 free_dma(dev->sync.chanA.txdma);
378 } 311 }
379 release_region(dev->sync.chanA.ctrlio-1, 8); 312 release_region(dev->chanA.ctrlio - 1, 8);
380 free_netdev(dev->netdev.dev); 313 free_netdev(dev->chanA.netdevice);
381 kfree(dev); 314 kfree(dev);
382} 315}
383 316
384#ifdef MODULE 317static int io = 0x200;
385 318static int irq = 9;
386static int io=0x200;
387static int irq=9;
388 319
389module_param(io, int, 0); 320module_param(io, int, 0);
390MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); 321MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card");
@@ -397,22 +328,17 @@ MODULE_AUTHOR("Alan Cox");
397MODULE_LICENSE("GPL"); 328MODULE_LICENSE("GPL");
398MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); 329MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11");
399 330
400static struct sv11_device *sv11_unit; 331static struct z8530_dev *sv11_unit;
401 332
402int init_module(void) 333int init_module(void)
403{ 334{
404 printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n"); 335 if ((sv11_unit = sv11_init(io, irq)) == NULL)
405 printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n");
406 if((sv11_unit=sv11_init(io,irq))==NULL)
407 return -ENODEV; 336 return -ENODEV;
408 return 0; 337 return 0;
409} 338}
410 339
411void cleanup_module(void) 340void cleanup_module(void)
412{ 341{
413 if(sv11_unit) 342 if (sv11_unit)
414 sv11_shutdown(sv11_unit); 343 sv11_shutdown(sv11_unit);
415} 344}
416
417#endif
418
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 44a89df1b8bf..c0235844a4d5 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -8,6 +8,7 @@
8 * 8 *
9 * (c) Copyright 1999, 2001 Alan Cox 9 * (c) Copyright 1999, 2001 Alan Cox
10 * (c) Copyright 2001 Red Hat Inc. 10 * (c) Copyright 2001 Red Hat Inc.
11 * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
11 * 12 *
12 */ 13 */
13 14
@@ -19,6 +20,7 @@
19#include <linux/netdevice.h> 20#include <linux/netdevice.h>
20#include <linux/if_arp.h> 21#include <linux/if_arp.h>
21#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/hdlc.h>
22#include <linux/ioport.h> 24#include <linux/ioport.h>
23#include <linux/init.h> 25#include <linux/init.h>
24#include <net/arp.h> 26#include <net/arp.h>
@@ -27,22 +29,19 @@
27#include <asm/io.h> 29#include <asm/io.h>
28#include <asm/dma.h> 30#include <asm/dma.h>
29#include <asm/byteorder.h> 31#include <asm/byteorder.h>
30#include <net/syncppp.h>
31#include "z85230.h" 32#include "z85230.h"
32 33
33 34
34struct slvl_device 35struct slvl_device
35{ 36{
36 void *if_ptr; /* General purpose pointer (used by SPPP) */
37 struct z8530_channel *chan; 37 struct z8530_channel *chan;
38 struct ppp_device pppdev;
39 int channel; 38 int channel;
40}; 39};
41 40
42 41
43struct slvl_board 42struct slvl_board
44{ 43{
45 struct slvl_device *dev[2]; 44 struct slvl_device dev[2];
46 struct z8530_dev board; 45 struct z8530_dev board;
47 int iobase; 46 int iobase;
48}; 47};
@@ -51,72 +50,69 @@ struct slvl_board
51 * Network driver support routines 50 * Network driver support routines
52 */ 51 */
53 52
53static inline struct slvl_device* dev_to_chan(struct net_device *dev)
54{
55 return (struct slvl_device *)dev_to_hdlc(dev)->priv;
56}
57
54/* 58/*
55 * Frame receive. Simple for our card as we do sync ppp and there 59 * Frame receive. Simple for our card as we do HDLC and there
56 * is no funny garbage involved 60 * is no funny garbage involved
57 */ 61 */
58 62
59static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) 63static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
60{ 64{
61 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ 65 /* Drop the CRC - it's not a good idea to try and negotiate it ;) */
62 skb_trim(skb, skb->len-2); 66 skb_trim(skb, skb->len - 2);
63 skb->protocol=htons(ETH_P_WAN_PPP); 67 skb->protocol = hdlc_type_trans(skb, c->netdevice);
64 skb_reset_mac_header(skb); 68 skb_reset_mac_header(skb);
65 skb->dev=c->netdevice; 69 skb->dev = c->netdevice;
66 /*
67 * Send it to the PPP layer. We don't have time to process
68 * it right now.
69 */
70 netif_rx(skb); 70 netif_rx(skb);
71 c->netdevice->last_rx = jiffies; 71 c->netdevice->last_rx = jiffies;
72} 72}
73 73
74/* 74/*
75 * We've been placed in the UP state 75 * We've been placed in the UP state
76 */ 76 */
77 77
78static int sealevel_open(struct net_device *d) 78static int sealevel_open(struct net_device *d)
79{ 79{
80 struct slvl_device *slvl=d->priv; 80 struct slvl_device *slvl = dev_to_chan(d);
81 int err = -1; 81 int err = -1;
82 int unit = slvl->channel; 82 int unit = slvl->channel;
83 83
84 /* 84 /*
85 * Link layer up. 85 * Link layer up.
86 */ 86 */
87 87
88 switch(unit) 88 switch (unit)
89 { 89 {
90 case 0: 90 case 0:
91 err=z8530_sync_dma_open(d, slvl->chan); 91 err = z8530_sync_dma_open(d, slvl->chan);
92 break; 92 break;
93 case 1: 93 case 1:
94 err=z8530_sync_open(d, slvl->chan); 94 err = z8530_sync_open(d, slvl->chan);
95 break; 95 break;
96 } 96 }
97 97
98 if(err) 98 if (err)
99 return err; 99 return err;
100 /* 100
101 * Begin PPP 101 err = hdlc_open(d);
102 */ 102 if (err) {
103 err=sppp_open(d); 103 switch (unit) {
104 if(err)
105 {
106 switch(unit)
107 {
108 case 0: 104 case 0:
109 z8530_sync_dma_close(d, slvl->chan); 105 z8530_sync_dma_close(d, slvl->chan);
110 break; 106 break;
111 case 1: 107 case 1:
112 z8530_sync_close(d, slvl->chan); 108 z8530_sync_close(d, slvl->chan);
113 break; 109 break;
114 } 110 }
115 return err; 111 return err;
116 } 112 }
117 113
118 slvl->chan->rx_function=sealevel_input; 114 slvl->chan->rx_function = sealevel_input;
119 115
120 /* 116 /*
121 * Go go go 117 * Go go go
122 */ 118 */
@@ -126,26 +122,19 @@ static int sealevel_open(struct net_device *d)
126 122
127static int sealevel_close(struct net_device *d) 123static int sealevel_close(struct net_device *d)
128{ 124{
129 struct slvl_device *slvl=d->priv; 125 struct slvl_device *slvl = dev_to_chan(d);
130 int unit = slvl->channel; 126 int unit = slvl->channel;
131 127
132 /* 128 /*
133 * Discard new frames 129 * Discard new frames
134 */ 130 */
135
136 slvl->chan->rx_function=z8530_null_rx;
137
138 /*
139 * PPP off
140 */
141 sppp_close(d);
142 /*
143 * Link layer down
144 */
145 131
132 slvl->chan->rx_function = z8530_null_rx;
133
134 hdlc_close(d);
146 netif_stop_queue(d); 135 netif_stop_queue(d);
147 136
148 switch(unit) 137 switch (unit)
149 { 138 {
150 case 0: 139 case 0:
151 z8530_sync_dma_close(d, slvl->chan); 140 z8530_sync_dma_close(d, slvl->chan);
@@ -159,210 +148,153 @@ static int sealevel_close(struct net_device *d)
159 148
160static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) 149static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
161{ 150{
162 /* struct slvl_device *slvl=d->priv; 151 /* struct slvl_device *slvl=dev_to_chan(d);
163 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ 152 z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
164 return sppp_do_ioctl(d, ifr,cmd); 153 return hdlc_ioctl(d, ifr, cmd);
165}
166
167static struct net_device_stats *sealevel_get_stats(struct net_device *d)
168{
169 struct slvl_device *slvl=d->priv;
170 if(slvl)
171 return z8530_get_stats(slvl->chan);
172 else
173 return NULL;
174} 154}
175 155
176/* 156/*
177 * Passed PPP frames, fire them downwind. 157 * Passed network frames, fire them downwind.
178 */ 158 */
179 159
180static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) 160static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d)
181{ 161{
182 struct slvl_device *slvl=d->priv; 162 return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
183 return z8530_queue_xmit(slvl->chan, skb);
184} 163}
185 164
186static int sealevel_neigh_setup(struct neighbour *n) 165static int sealevel_attach(struct net_device *dev, unsigned short encoding,
166 unsigned short parity)
187{ 167{
188 if (n->nud_state == NUD_NONE) { 168 if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT)
189 n->ops = &arp_broken_ops; 169 return 0;
190 n->output = n->ops->output; 170 return -EINVAL;
191 }
192 return 0;
193} 171}
194 172
195static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) 173static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
196{ 174{
197 if (p->tbl->family == AF_INET) { 175 struct net_device *dev = alloc_hdlcdev(sv);
198 p->neigh_setup = sealevel_neigh_setup; 176 if (!dev)
199 p->ucast_probes = 0; 177 return -1;
200 p->mcast_probes = 0; 178
179 dev_to_hdlc(dev)->attach = sealevel_attach;
180 dev_to_hdlc(dev)->xmit = sealevel_queue_xmit;
181 dev->open = sealevel_open;
182 dev->stop = sealevel_close;
183 dev->do_ioctl = sealevel_ioctl;
184 dev->base_addr = iobase;
185 dev->irq = irq;
186
187 if (register_hdlc_device(dev)) {
188 printk(KERN_ERR "sealevel: unable to register HDLC device\n");
189 free_netdev(dev);
190 return -1;
201 } 191 }
202 return 0;
203}
204 192
205static int sealevel_attach(struct net_device *dev) 193 sv->chan->netdevice = dev;
206{
207 struct slvl_device *sv = dev->priv;
208 sppp_attach(&sv->pppdev);
209 return 0; 194 return 0;
210} 195}
211 196
212static void sealevel_detach(struct net_device *dev)
213{
214 sppp_detach(dev);
215}
216
217static void slvl_setup(struct net_device *d)
218{
219 d->open = sealevel_open;
220 d->stop = sealevel_close;
221 d->init = sealevel_attach;
222 d->uninit = sealevel_detach;
223 d->hard_start_xmit = sealevel_queue_xmit;
224 d->get_stats = sealevel_get_stats;
225 d->set_multicast_list = NULL;
226 d->do_ioctl = sealevel_ioctl;
227 d->neigh_setup = sealevel_neigh_setup_dev;
228 d->set_mac_address = NULL;
229
230}
231
232static inline struct slvl_device *slvl_alloc(int iobase, int irq)
233{
234 struct net_device *d;
235 struct slvl_device *sv;
236
237 d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d",
238 slvl_setup);
239
240 if (!d)
241 return NULL;
242
243 sv = d->priv;
244 d->ml_priv = sv;
245 sv->if_ptr = &sv->pppdev;
246 sv->pppdev.dev = d;
247 d->base_addr = iobase;
248 d->irq = irq;
249
250 return sv;
251}
252
253 197
254/* 198/*
255 * Allocate and setup Sealevel board. 199 * Allocate and setup Sealevel board.
256 */ 200 */
257 201
258static __init struct slvl_board *slvl_init(int iobase, int irq, 202static __init struct slvl_board *slvl_init(int iobase, int irq,
259 int txdma, int rxdma, int slow) 203 int txdma, int rxdma, int slow)
260{ 204{
261 struct z8530_dev *dev; 205 struct z8530_dev *dev;
262 struct slvl_board *b; 206 struct slvl_board *b;
263 207
264 /* 208 /*
265 * Get the needed I/O space 209 * Get the needed I/O space
266 */ 210 */
267 211
268 if(!request_region(iobase, 8, "Sealevel 4021")) 212 if (!request_region(iobase, 8, "Sealevel 4021")) {
269 { 213 printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n",
270 printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase); 214 iobase);
271 return NULL; 215 return NULL;
272 } 216 }
273
274 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
275 if(!b)
276 goto fail3;
277 217
278 if (!(b->dev[0]= slvl_alloc(iobase, irq))) 218 b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL);
279 goto fail2; 219 if (!b)
220 goto err_kzalloc;
280 221
281 b->dev[0]->chan = &b->board.chanA; 222 b->dev[0].chan = &b->board.chanA;
282 b->dev[0]->channel = 0; 223 b->dev[0].channel = 0;
283
284 if (!(b->dev[1] = slvl_alloc(iobase, irq)))
285 goto fail1_0;
286 224
287 b->dev[1]->chan = &b->board.chanB; 225 b->dev[1].chan = &b->board.chanB;
288 b->dev[1]->channel = 1; 226 b->dev[1].channel = 1;
289 227
290 dev = &b->board; 228 dev = &b->board;
291 229
292 /* 230 /*
293 * Stuff in the I/O addressing 231 * Stuff in the I/O addressing
294 */ 232 */
295 233
296 dev->active = 0; 234 dev->active = 0;
297 235
298 b->iobase = iobase; 236 b->iobase = iobase;
299 237
300 /* 238 /*
301 * Select 8530 delays for the old board 239 * Select 8530 delays for the old board
302 */ 240 */
303 241
304 if(slow) 242 if (slow)
305 iobase |= Z8530_PORT_SLEEP; 243 iobase |= Z8530_PORT_SLEEP;
306 244
307 dev->chanA.ctrlio=iobase+1; 245 dev->chanA.ctrlio = iobase + 1;
308 dev->chanA.dataio=iobase; 246 dev->chanA.dataio = iobase;
309 dev->chanB.ctrlio=iobase+3; 247 dev->chanB.ctrlio = iobase + 3;
310 dev->chanB.dataio=iobase+2; 248 dev->chanB.dataio = iobase + 2;
311 249
312 dev->chanA.irqs=&z8530_nop; 250 dev->chanA.irqs = &z8530_nop;
313 dev->chanB.irqs=&z8530_nop; 251 dev->chanB.irqs = &z8530_nop;
314 252
315 /* 253 /*
316 * Assert DTR enable DMA 254 * Assert DTR enable DMA
317 */ 255 */
318 256
319 outb(3|(1<<7), b->iobase+4); 257 outb(3 | (1 << 7), b->iobase + 4);
320 258
321 259
322 /* We want a fast IRQ for this device. Actually we'd like an even faster 260 /* We want a fast IRQ for this device. Actually we'd like an even faster
323 IRQ ;) - This is one driver RtLinux is made for */ 261 IRQ ;) - This is one driver RtLinux is made for */
324 262
325 if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev)<0) 263 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED,
326 { 264 "SeaLevel", dev) < 0) {
327 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); 265 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
328 goto fail1_1; 266 goto err_request_irq;
329 } 267 }
330 268
331 dev->irq=irq; 269 dev->irq = irq;
332 dev->chanA.private=&b->dev[0]; 270 dev->chanA.private = &b->dev[0];
333 dev->chanB.private=&b->dev[1]; 271 dev->chanB.private = &b->dev[1];
334 dev->chanA.netdevice=b->dev[0]->pppdev.dev; 272 dev->chanA.dev = dev;
335 dev->chanB.netdevice=b->dev[1]->pppdev.dev; 273 dev->chanB.dev = dev;
336 dev->chanA.dev=dev; 274
337 dev->chanB.dev=dev; 275 dev->chanA.txdma = 3;
338 276 dev->chanA.rxdma = 1;
339 dev->chanA.txdma=3; 277 if (request_dma(dev->chanA.txdma, "SeaLevel (TX)"))
340 dev->chanA.rxdma=1; 278 goto err_dma_tx;
341 if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0) 279
342 goto fail; 280 if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)"))
343 281 goto err_dma_rx;
344 if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0) 282
345 goto dmafail;
346
347 disable_irq(irq); 283 disable_irq(irq);
348 284
349 /* 285 /*
350 * Begin normal initialise 286 * Begin normal initialise
351 */ 287 */
352 288
353 if(z8530_init(dev)!=0) 289 if (z8530_init(dev) != 0) {
354 {
355 printk(KERN_ERR "Z8530 series device not found.\n"); 290 printk(KERN_ERR "Z8530 series device not found.\n");
356 enable_irq(irq); 291 enable_irq(irq);
357 goto dmafail2; 292 goto free_hw;
358 } 293 }
359 if(dev->type==Z85C30) 294 if (dev->type == Z85C30) {
360 {
361 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); 295 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream);
362 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); 296 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream);
363 } 297 } else {
364 else
365 {
366 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); 298 z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230);
367 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); 299 z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
368 } 300 }
@@ -370,36 +302,31 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
370 /* 302 /*
371 * Now we can take the IRQ 303 * Now we can take the IRQ
372 */ 304 */
373 305
374 enable_irq(irq); 306 enable_irq(irq);
375 307
376 if (register_netdev(b->dev[0]->pppdev.dev)) 308 if (slvl_setup(&b->dev[0], iobase, irq))
377 goto dmafail2; 309 goto free_hw;
378 310 if (slvl_setup(&b->dev[1], iobase, irq))
379 if (register_netdev(b->dev[1]->pppdev.dev)) 311 goto free_netdev0;
380 goto fail_unit;
381 312
382 z8530_describe(dev, "I/O", iobase); 313 z8530_describe(dev, "I/O", iobase);
383 dev->active=1; 314 dev->active = 1;
384 return b; 315 return b;
385 316
386fail_unit: 317free_netdev0:
387 unregister_netdev(b->dev[0]->pppdev.dev); 318 unregister_hdlc_device(b->dev[0].chan->netdevice);
388 319 free_netdev(b->dev[0].chan->netdevice);
389dmafail2: 320free_hw:
390 free_dma(dev->chanA.rxdma); 321 free_dma(dev->chanA.rxdma);
391dmafail: 322err_dma_rx:
392 free_dma(dev->chanA.txdma); 323 free_dma(dev->chanA.txdma);
393fail: 324err_dma_tx:
394 free_irq(irq, dev); 325 free_irq(irq, dev);
395fail1_1: 326err_request_irq:
396 free_netdev(b->dev[1]->pppdev.dev);
397fail1_0:
398 free_netdev(b->dev[0]->pppdev.dev);
399fail2:
400 kfree(b); 327 kfree(b);
401fail3: 328err_kzalloc:
402 release_region(iobase,8); 329 release_region(iobase, 8);
403 return NULL; 330 return NULL;
404} 331}
405 332
@@ -408,14 +335,14 @@ static void __exit slvl_shutdown(struct slvl_board *b)
408 int u; 335 int u;
409 336
410 z8530_shutdown(&b->board); 337 z8530_shutdown(&b->board);
411 338
412 for(u=0; u<2; u++) 339 for (u = 0; u < 2; u++)
413 { 340 {
414 struct net_device *d = b->dev[u]->pppdev.dev; 341 struct net_device *d = b->dev[u].chan->netdevice;
415 unregister_netdev(d); 342 unregister_hdlc_device(d);
416 free_netdev(d); 343 free_netdev(d);
417 } 344 }
418 345
419 free_irq(b->board.irq, &b->board); 346 free_irq(b->board.irq, &b->board);
420 free_dma(b->board.chanA.rxdma); 347 free_dma(b->board.chanA.rxdma);
421 free_dma(b->board.chanA.txdma); 348 free_dma(b->board.chanA.txdma);
@@ -451,10 +378,6 @@ static struct slvl_board *slvl_unit;
451 378
452static int __init slvl_init_module(void) 379static int __init slvl_init_module(void)
453{ 380{
454#ifdef MODULE
455 printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n");
456 printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n");
457#endif
458 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); 381 slvl_unit = slvl_init(io, irq, txdma, rxdma, slow);
459 382
460 return slvl_unit ? 0 : -ENODEV; 383 return slvl_unit ? 0 : -ENODEV;
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 98ef400908b8..243bd8d918fe 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -43,6 +43,7 @@
43#include <linux/netdevice.h> 43#include <linux/netdevice.h>
44#include <linux/if_arp.h> 44#include <linux/if_arp.h>
45#include <linux/delay.h> 45#include <linux/delay.h>
46#include <linux/hdlc.h>
46#include <linux/ioport.h> 47#include <linux/ioport.h>
47#include <linux/init.h> 48#include <linux/init.h>
48#include <asm/dma.h> 49#include <asm/dma.h>
@@ -51,7 +52,6 @@
51#define RT_UNLOCK 52#define RT_UNLOCK
52#include <linux/spinlock.h> 53#include <linux/spinlock.h>
53 54
54#include <net/syncppp.h>
55#include "z85230.h" 55#include "z85230.h"
56 56
57 57
@@ -440,51 +440,46 @@ static void z8530_tx(struct z8530_channel *c)
440 * A status event occurred in PIO synchronous mode. There are several 440 * A status event occurred in PIO synchronous mode. There are several
441 * reasons the chip will bother us here. A transmit underrun means we 441 * reasons the chip will bother us here. A transmit underrun means we
442 * failed to feed the chip fast enough and just broke a packet. A DCD 442 * failed to feed the chip fast enough and just broke a packet. A DCD
443 * change is a line up or down. We communicate that back to the protocol 443 * change is a line up or down.
444 * layer for synchronous PPP to renegotiate.
445 */ 444 */
446 445
447static void z8530_status(struct z8530_channel *chan) 446static void z8530_status(struct z8530_channel *chan)
448{ 447{
449 u8 status, altered; 448 u8 status, altered;
450 449
451 status=read_zsreg(chan, R0); 450 status = read_zsreg(chan, R0);
452 altered=chan->status^status; 451 altered = chan->status ^ status;
453 452
454 chan->status=status; 453 chan->status = status;
455 454
456 if(status&TxEOM) 455 if (status & TxEOM) {
457 {
458/* printk("%s: Tx underrun.\n", chan->dev->name); */ 456/* printk("%s: Tx underrun.\n", chan->dev->name); */
459 chan->stats.tx_fifo_errors++; 457 chan->netdevice->stats.tx_fifo_errors++;
460 write_zsctrl(chan, ERR_RES); 458 write_zsctrl(chan, ERR_RES);
461 z8530_tx_done(chan); 459 z8530_tx_done(chan);
462 } 460 }
463 461
464 if(altered&chan->dcdcheck) 462 if (altered & chan->dcdcheck)
465 { 463 {
466 if(status&chan->dcdcheck) 464 if (status & chan->dcdcheck) {
467 {
468 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 465 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
469 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 466 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
470 if(chan->netdevice && 467 if (chan->netdevice)
471 ((chan->netdevice->type == ARPHRD_HDLC) || 468 netif_carrier_on(chan->netdevice);
472 (chan->netdevice->type == ARPHRD_PPP))) 469 } else {
473 sppp_reopen(chan->netdevice);
474 }
475 else
476 {
477 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); 470 printk(KERN_INFO "%s: DCD lost\n", chan->dev->name);
478 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 471 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
479 z8530_flush_fifo(chan); 472 z8530_flush_fifo(chan);
473 if (chan->netdevice)
474 netif_carrier_off(chan->netdevice);
480 } 475 }
481 476
482 } 477 }
483 write_zsctrl(chan, RES_EXT_INT); 478 write_zsctrl(chan, RES_EXT_INT);
484 write_zsctrl(chan, RES_H_IUS); 479 write_zsctrl(chan, RES_H_IUS);
485} 480}
486 481
487struct z8530_irqhandler z8530_sync= 482struct z8530_irqhandler z8530_sync =
488{ 483{
489 z8530_rx, 484 z8530_rx,
490 z8530_tx, 485 z8530_tx,
@@ -556,8 +551,7 @@ static void z8530_dma_tx(struct z8530_channel *chan)
556 * 551 *
557 * A status event occurred on the Z8530. We receive these for two reasons 552 * A status event occurred on the Z8530. We receive these for two reasons
558 * when in DMA mode. Firstly if we finished a packet transfer we get one 553 * when in DMA mode. Firstly if we finished a packet transfer we get one
559 * and kick the next packet out. Secondly we may see a DCD change and 554 * and kick the next packet out. Secondly we may see a DCD change.
560 * have to poke the protocol layer.
561 * 555 *
562 */ 556 */
563 557
@@ -586,24 +580,21 @@ static void z8530_dma_status(struct z8530_channel *chan)
586 } 580 }
587 } 581 }
588 582
589 if(altered&chan->dcdcheck) 583 if (altered & chan->dcdcheck)
590 { 584 {
591 if(status&chan->dcdcheck) 585 if (status & chan->dcdcheck) {
592 {
593 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); 586 printk(KERN_INFO "%s: DCD raised\n", chan->dev->name);
594 write_zsreg(chan, R3, chan->regs[3]|RxENABLE); 587 write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
595 if(chan->netdevice && 588 if (chan->netdevice)
596 ((chan->netdevice->type == ARPHRD_HDLC) || 589 netif_carrier_on(chan->netdevice);
597 (chan->netdevice->type == ARPHRD_PPP))) 590 } else {
598 sppp_reopen(chan->netdevice);
599 }
600 else
601 {
602 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); 591 printk(KERN_INFO "%s:DCD lost\n", chan->dev->name);
603 write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); 592 write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
604 z8530_flush_fifo(chan); 593 z8530_flush_fifo(chan);
594 if (chan->netdevice)
595 netif_carrier_off(chan->netdevice);
605 } 596 }
606 } 597 }
607 598
608 write_zsctrl(chan, RES_EXT_INT); 599 write_zsctrl(chan, RES_EXT_INT);
609 write_zsctrl(chan, RES_H_IUS); 600 write_zsctrl(chan, RES_H_IUS);
@@ -1459,10 +1450,10 @@ static void z8530_tx_begin(struct z8530_channel *c)
1459 /* 1450 /*
1460 * Check if we crapped out. 1451 * Check if we crapped out.
1461 */ 1452 */
1462 if(get_dma_residue(c->txdma)) 1453 if (get_dma_residue(c->txdma))
1463 { 1454 {
1464 c->stats.tx_dropped++; 1455 c->netdevice->stats.tx_dropped++;
1465 c->stats.tx_fifo_errors++; 1456 c->netdevice->stats.tx_fifo_errors++;
1466 } 1457 }
1467 release_dma_lock(flags); 1458 release_dma_lock(flags);
1468 } 1459 }
@@ -1534,21 +1525,21 @@ static void z8530_tx_begin(struct z8530_channel *c)
1534 * packet. This code is fairly timing sensitive. 1525 * packet. This code is fairly timing sensitive.
1535 * 1526 *
1536 * Called with the register lock held. 1527 * Called with the register lock held.
1537 */ 1528 */
1538 1529
1539static void z8530_tx_done(struct z8530_channel *c) 1530static void z8530_tx_done(struct z8530_channel *c)
1540{ 1531{
1541 struct sk_buff *skb; 1532 struct sk_buff *skb;
1542 1533
1543 /* Actually this can happen.*/ 1534 /* Actually this can happen.*/
1544 if(c->tx_skb==NULL) 1535 if (c->tx_skb == NULL)
1545 return; 1536 return;
1546 1537
1547 skb=c->tx_skb; 1538 skb = c->tx_skb;
1548 c->tx_skb=NULL; 1539 c->tx_skb = NULL;
1549 z8530_tx_begin(c); 1540 z8530_tx_begin(c);
1550 c->stats.tx_packets++; 1541 c->netdevice->stats.tx_packets++;
1551 c->stats.tx_bytes+=skb->len; 1542 c->netdevice->stats.tx_bytes += skb->len;
1552 dev_kfree_skb_irq(skb); 1543 dev_kfree_skb_irq(skb);
1553} 1544}
1554 1545
@@ -1558,7 +1549,7 @@ static void z8530_tx_done(struct z8530_channel *c)
1558 * @skb: The buffer 1549 * @skb: The buffer
1559 * 1550 *
1560 * We point the receive handler at this function when idle. Instead 1551 * We point the receive handler at this function when idle. Instead
1561 * of syncppp processing the frames we get to throw them away. 1552 * of processing the frames we get to throw them away.
1562 */ 1553 */
1563 1554
1564void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) 1555void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
@@ -1635,10 +1626,11 @@ static void z8530_rx_done(struct z8530_channel *c)
1635 else 1626 else
1636 /* Can't occur as we dont reenable the DMA irq until 1627 /* Can't occur as we dont reenable the DMA irq until
1637 after the flip is done */ 1628 after the flip is done */
1638 printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); 1629 printk(KERN_WARNING "%s: DMA flip overrun!\n",
1639 1630 c->netdevice->name);
1631
1640 release_dma_lock(flags); 1632 release_dma_lock(flags);
1641 1633
1642 /* 1634 /*
1643 * Shove the old buffer into an sk_buff. We can't DMA 1635 * Shove the old buffer into an sk_buff. We can't DMA
1644 * directly into one on a PC - it might be above the 16Mb 1636 * directly into one on a PC - it might be above the 16Mb
@@ -1646,27 +1638,23 @@ static void z8530_rx_done(struct z8530_channel *c)
1646 * can avoid the copy. Optimisation 2 - make the memcpy 1638 * can avoid the copy. Optimisation 2 - make the memcpy
1647 * a copychecksum. 1639 * a copychecksum.
1648 */ 1640 */
1649 1641
1650 skb=dev_alloc_skb(ct); 1642 skb = dev_alloc_skb(ct);
1651 if(skb==NULL) 1643 if (skb == NULL) {
1652 { 1644 c->netdevice->stats.rx_dropped++;
1653 c->stats.rx_dropped++; 1645 printk(KERN_WARNING "%s: Memory squeeze.\n",
1654 printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); 1646 c->netdevice->name);
1655 } 1647 } else {
1656 else
1657 {
1658 skb_put(skb, ct); 1648 skb_put(skb, ct);
1659 skb_copy_to_linear_data(skb, rxb, ct); 1649 skb_copy_to_linear_data(skb, rxb, ct);
1660 c->stats.rx_packets++; 1650 c->netdevice->stats.rx_packets++;
1661 c->stats.rx_bytes+=ct; 1651 c->netdevice->stats.rx_bytes += ct;
1662 } 1652 }
1663 c->dma_ready=1; 1653 c->dma_ready = 1;
1664 } 1654 } else {
1665 else 1655 RT_LOCK;
1666 { 1656 skb = c->skb;
1667 RT_LOCK; 1657
1668 skb=c->skb;
1669
1670 /* 1658 /*
1671 * The game we play for non DMA is similar. We want to 1659 * The game we play for non DMA is similar. We want to
1672 * get the controller set up for the next packet as fast 1660 * get the controller set up for the next packet as fast
@@ -1677,48 +1665,39 @@ static void z8530_rx_done(struct z8530_channel *c)
1677 * if you build a system where the sync irq isnt blocked 1665 * if you build a system where the sync irq isnt blocked
1678 * by the kernel IRQ disable then you need only block the 1666 * by the kernel IRQ disable then you need only block the
1679 * sync IRQ for the RT_LOCK area. 1667 * sync IRQ for the RT_LOCK area.
1680 * 1668 *
1681 */ 1669 */
1682 ct=c->count; 1670 ct=c->count;
1683 1671
1684 c->skb = c->skb2; 1672 c->skb = c->skb2;
1685 c->count = 0; 1673 c->count = 0;
1686 c->max = c->mtu; 1674 c->max = c->mtu;
1687 if(c->skb) 1675 if (c->skb) {
1688 {
1689 c->dptr = c->skb->data; 1676 c->dptr = c->skb->data;
1690 c->max = c->mtu; 1677 c->max = c->mtu;
1691 } 1678 } else {
1692 else 1679 c->count = 0;
1693 {
1694 c->count= 0;
1695 c->max = 0; 1680 c->max = 0;
1696 } 1681 }
1697 RT_UNLOCK; 1682 RT_UNLOCK;
1698 1683
1699 c->skb2 = dev_alloc_skb(c->mtu); 1684 c->skb2 = dev_alloc_skb(c->mtu);
1700 if(c->skb2==NULL) 1685 if (c->skb2 == NULL)
1701 printk(KERN_WARNING "%s: memory squeeze.\n", 1686 printk(KERN_WARNING "%s: memory squeeze.\n",
1702 c->netdevice->name); 1687 c->netdevice->name);
1703 else 1688 else
1704 { 1689 skb_put(c->skb2, c->mtu);
1705 skb_put(c->skb2,c->mtu); 1690 c->netdevice->stats.rx_packets++;
1706 } 1691 c->netdevice->stats.rx_bytes += ct;
1707 c->stats.rx_packets++;
1708 c->stats.rx_bytes+=ct;
1709
1710 } 1692 }
1711 /* 1693 /*
1712 * If we received a frame we must now process it. 1694 * If we received a frame we must now process it.
1713 */ 1695 */
1714 if(skb) 1696 if (skb) {
1715 {
1716 skb_trim(skb, ct); 1697 skb_trim(skb, ct);
1717 c->rx_function(c,skb); 1698 c->rx_function(c, skb);
1718 } 1699 } else {
1719 else 1700 c->netdevice->stats.rx_dropped++;
1720 {
1721 c->stats.rx_dropped++;
1722 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); 1701 printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name);
1723 } 1702 }
1724} 1703}
@@ -1730,7 +1709,7 @@ static void z8530_rx_done(struct z8530_channel *c)
1730 * Returns true if the buffer cross a DMA boundary on a PC. The poor 1709 * Returns true if the buffer cross a DMA boundary on a PC. The poor
1731 * thing can only DMA within a 64K block not across the edges of it. 1710 * thing can only DMA within a 64K block not across the edges of it.
1732 */ 1711 */
1733 1712
1734static inline int spans_boundary(struct sk_buff *skb) 1713static inline int spans_boundary(struct sk_buff *skb)
1735{ 1714{
1736 unsigned long a=(unsigned long)skb->data; 1715 unsigned long a=(unsigned long)skb->data;
@@ -1799,24 +1778,6 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1799 1778
1800EXPORT_SYMBOL(z8530_queue_xmit); 1779EXPORT_SYMBOL(z8530_queue_xmit);
1801 1780
1802/**
1803 * z8530_get_stats - Get network statistics
1804 * @c: The channel to use
1805 *
1806 * Get the statistics block. We keep the statistics in software as
1807 * the chip doesn't do it for us.
1808 *
1809 * Locking is ignored here - we could lock for a copy but its
1810 * not likely to be that big an issue
1811 */
1812
1813struct net_device_stats *z8530_get_stats(struct z8530_channel *c)
1814{
1815 return &c->stats;
1816}
1817
1818EXPORT_SYMBOL(z8530_get_stats);
1819
1820/* 1781/*
1821 * Module support 1782 * Module support
1822 */ 1783 */
diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h
index 158aea7b8eac..4f372396c512 100644
--- a/drivers/net/wan/z85230.h
+++ b/drivers/net/wan/z85230.h
@@ -325,7 +325,6 @@ struct z8530_channel
325 325
326 void *private; /* For our owner */ 326 void *private; /* For our owner */
327 struct net_device *netdevice; /* Network layer device */ 327 struct net_device *netdevice; /* Network layer device */
328 struct net_device_stats stats; /* Network layer statistics */
329 328
330 /* 329 /*
331 * Async features 330 * Async features
@@ -366,13 +365,13 @@ struct z8530_channel
366 unsigned char tx_active; /* character is being xmitted */ 365 unsigned char tx_active; /* character is being xmitted */
367 unsigned char tx_stopped; /* output is suspended */ 366 unsigned char tx_stopped; /* output is suspended */
368 367
369 spinlock_t *lock; /* Devicr lock */ 368 spinlock_t *lock; /* Device lock */
370}; 369};
371 370
372/* 371/*
373 * Each Z853x0 device. 372 * Each Z853x0 device.
374 */ 373 */
375 374
376struct z8530_dev 375struct z8530_dev
377{ 376{
378 char *name; /* Device instance name */ 377 char *name; /* Device instance name */
@@ -408,7 +407,6 @@ extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
408extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); 407extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
409extern int z8530_channel_load(struct z8530_channel *, u8 *); 408extern int z8530_channel_load(struct z8530_channel *, u8 *);
410extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); 409extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
411extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c);
412extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); 410extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
413 411
414 412