aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wan/hd64570.c
diff options
context:
space:
mode:
authorKrzysztof Hałasa <khc@pm.waw.pl>2008-03-24 11:39:02 -0400
committerKrzysztof Hałasa <khc@pm.waw.pl>2008-11-21 20:49:47 -0500
commit6b40aba304e6f94c747ad9559e03ea03a49e8008 (patch)
tree9f5adb7f2c5dee73a9d86fa99ef408300ba50423 /drivers/net/wan/hd64570.c
parente245a3855eec42127b722ed9688a49ec3f3c9a27 (diff)
WAN: split hd6457x.c into hd64570.c and hd64572.c
Supporting both original SCA and SCA-II in one file was nice at some point but now it's increasingly painful. Signed-off-by: Krzysztof Hałasa <khc@pm.waw.pl>
Diffstat (limited to 'drivers/net/wan/hd64570.c')
-rw-r--r--drivers/net/wan/hd64570.c867
1 files changed, 867 insertions, 0 deletions
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
new file mode 100644
index 000000000000..434583a94b32
--- /dev/null
+++ b/drivers/net/wan/hd64570.c
@@ -0,0 +1,867 @@
1/*
2 * Hitachi SCA HD64570 and HD64572 common driver for Linux
3 *
4 * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * Sources of information:
11 * Hitachi HD64570 SCA User's Manual
12 * Hitachi HD64572 SCA-II User's Manual
13 *
14 * We use the following SCA memory map:
15 *
16 * Packet buffer descriptor rings - starting from winbase or win0base:
17 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
18 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
19 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
20 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
21 *
22 * Packet data buffers - starting from winbase + buff_offset:
23 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
24 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
25 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
26 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
27 */
28
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/slab.h>
32#include <linux/jiffies.h>
33#include <linux/types.h>
34#include <linux/fcntl.h>
35#include <linux/interrupt.h>
36#include <linux/in.h>
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/init.h>
40#include <linux/ioport.h>
41#include <linux/bitops.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/io.h>
46
47#include <linux/netdevice.h>
48#include <linux/skbuff.h>
49
50#include <linux/hdlc.h>
51
52#if (!defined (__HD64570_H) && !defined (__HD64572_H)) || \
53 (defined (__HD64570_H) && defined (__HD64572_H))
54#error Either hd64570.h or hd64572.h must be included
55#endif
56
57#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
58#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
59#define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
60
61#define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01)
62#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
63#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
64
65#ifdef __HD64570_H /* HD64570 */
66#define sca_outa(value, reg, card) sca_outw(value, reg, card)
67#define sca_ina(reg, card) sca_inw(reg, card)
68#define writea(value, ptr) writew(value, ptr)
69
70#else /* HD64572 */
71#define sca_outa(value, reg, card) sca_outl(value, reg, card)
72#define sca_ina(reg, card) sca_inl(reg, card)
73#define writea(value, ptr) writel(value, ptr)
74#endif
75
76static inline struct net_device *port_to_dev(port_t *port)
77{
78 return port->dev;
79}
80
81static inline int sca_intr_status(card_t *card)
82{
83 u8 result = 0;
84
85#ifdef __HD64570_H /* HD64570 */
86 u8 isr0 = sca_in(ISR0, card);
87 u8 isr1 = sca_in(ISR1, card);
88
89 if (isr1 & 0x03) result |= SCA_INTR_DMAC_RX(0);
90 if (isr1 & 0x0C) result |= SCA_INTR_DMAC_TX(0);
91 if (isr1 & 0x30) result |= SCA_INTR_DMAC_RX(1);
92 if (isr1 & 0xC0) result |= SCA_INTR_DMAC_TX(1);
93 if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
94 if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
95
96#else /* HD64572 */
97 u32 isr0 = sca_inl(ISR0, card);
98
99 if (isr0 & 0x0000000F) result |= SCA_INTR_DMAC_RX(0);
100 if (isr0 & 0x000000F0) result |= SCA_INTR_DMAC_TX(0);
101 if (isr0 & 0x00000F00) result |= SCA_INTR_DMAC_RX(1);
102 if (isr0 & 0x0000F000) result |= SCA_INTR_DMAC_TX(1);
103 if (isr0 & 0x003E0000) result |= SCA_INTR_MSCI(0);
104 if (isr0 & 0x3E000000) result |= SCA_INTR_MSCI(1);
105
106#endif /* HD64570 vs HD64572 */
107
108 if (!(result & SCA_INTR_DMAC_TX(0)))
109 if (sca_in(DSR_TX(0), card) & DSR_EOM)
110 result |= SCA_INTR_DMAC_TX(0);
111 if (!(result & SCA_INTR_DMAC_TX(1)))
112 if (sca_in(DSR_TX(1), card) & DSR_EOM)
113 result |= SCA_INTR_DMAC_TX(1);
114
115 return result;
116}
117
118static inline port_t* dev_to_port(struct net_device *dev)
119{
120 return dev_to_hdlc(dev)->priv;
121}
122
123static inline u16 next_desc(port_t *port, u16 desc, int transmit)
124{
125 return (desc + 1) % (transmit ? port_to_card(port)->tx_ring_buffers
126 : port_to_card(port)->rx_ring_buffers);
127}
128
129
130
131static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
132{
133 u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
134 u16 tx_buffs = port_to_card(port)->tx_ring_buffers;
135
136 desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
137 return log_node(port) * (rx_buffs + tx_buffs) +
138 transmit * rx_buffs + desc;
139}
140
141
142
143static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
144{
145 /* Descriptor offset always fits in 16 bytes */
146 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
147}
148
149
150
151static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, int transmit)
152{
153#ifdef PAGE0_ALWAYS_MAPPED
154 return (pkt_desc __iomem *)(win0base(port_to_card(port))
155 + desc_offset(port, desc, transmit));
156#else
157 return (pkt_desc __iomem *)(winbase(port_to_card(port))
158 + desc_offset(port, desc, transmit));
159#endif
160}
161
162
163
164static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
165{
166 return port_to_card(port)->buff_offset +
167 desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
168}
169
170
171static inline void sca_set_carrier(port_t *port)
172{
173 if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
174#ifdef DEBUG_LINK
175 printk(KERN_DEBUG "%s: sca_set_carrier on\n",
176 port_to_dev(port)->name);
177#endif
178 netif_carrier_on(port_to_dev(port));
179 } else {
180#ifdef DEBUG_LINK
181 printk(KERN_DEBUG "%s: sca_set_carrier off\n",
182 port_to_dev(port)->name);
183#endif
184 netif_carrier_off(port_to_dev(port));
185 }
186}
187
188
189static void sca_init_sync_port(port_t *port)
190{
191 card_t *card = port_to_card(port);
192 int transmit, i;
193
194 port->rxin = 0;
195 port->txin = 0;
196 port->txlast = 0;
197
198#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
199 openwin(card, 0);
200#endif
201
202 for (transmit = 0; transmit < 2; transmit++) {
203 u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port);
204 u16 buffs = transmit ? card->tx_ring_buffers
205 : card->rx_ring_buffers;
206
207 for (i = 0; i < buffs; i++) {
208 pkt_desc __iomem *desc = desc_address(port, i, transmit);
209 u16 chain_off = desc_offset(port, i + 1, transmit);
210 u32 buff_off = buffer_offset(port, i, transmit);
211
212 writea(chain_off, &desc->cp);
213 writel(buff_off, &desc->bp);
214 writew(0, &desc->len);
215 writeb(0, &desc->stat);
216 }
217
218 /* DMA disable - to halt state */
219 sca_out(0, transmit ? DSR_TX(phy_node(port)) :
220 DSR_RX(phy_node(port)), card);
221 /* software ABORT - to initial state */
222 sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
223 DCR_RX(phy_node(port)), card);
224
225#ifdef __HD64570_H
226 sca_out(0, dmac + CPB, card); /* pointer base */
227#endif
228 /* current desc addr */
229 sca_outa(desc_offset(port, 0, transmit), dmac + CDAL, card);
230 if (!transmit)
231 sca_outa(desc_offset(port, buffs - 1, transmit),
232 dmac + EDAL, card);
233 else
234 sca_outa(desc_offset(port, 0, transmit), dmac + EDAL,
235 card);
236
237 /* clear frame end interrupt counter */
238 sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) :
239 DCR_RX(phy_node(port)), card);
240
241 if (!transmit) { /* Receive */
242 /* set buffer length */
243 sca_outw(HDLC_MAX_MRU, dmac + BFLL, card);
244 /* Chain mode, Multi-frame */
245 sca_out(0x14, DMR_RX(phy_node(port)), card);
246 sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)),
247 card);
248 /* DMA enable */
249 sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
250 } else { /* Transmit */
251 /* Chain mode, Multi-frame */
252 sca_out(0x14, DMR_TX(phy_node(port)), card);
253 /* enable underflow interrupts */
254 sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
255 }
256 }
257 sca_set_carrier(port);
258}
259
260
261
262#ifdef NEED_SCA_MSCI_INTR
263/* MSCI interrupt service */
264static inline void sca_msci_intr(port_t *port)
265{
266 u16 msci = get_msci(port);
267 card_t* card = port_to_card(port);
268 u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
269
270 /* Reset MSCI TX underrun and CDCD status bit */
271 sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);
272
273 if (stat & ST1_UDRN) {
274 /* TX Underrun error detected */
275 port_to_dev(port)->stats.tx_errors++;
276 port_to_dev(port)->stats.tx_fifo_errors++;
277 }
278
279 if (stat & ST1_CDCD)
280 sca_set_carrier(port);
281}
282#endif
283
284
285
286static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u16 rxin)
287{
288 struct net_device *dev = port_to_dev(port);
289 struct sk_buff *skb;
290 u16 len;
291 u32 buff;
292#ifndef ALL_PAGES_ALWAYS_MAPPED
293 u32 maxlen;
294 u8 page;
295#endif
296
297 len = readw(&desc->len);
298 skb = dev_alloc_skb(len);
299 if (!skb) {
300 dev->stats.rx_dropped++;
301 return;
302 }
303
304 buff = buffer_offset(port, rxin, 0);
305#ifndef ALL_PAGES_ALWAYS_MAPPED
306 page = buff / winsize(card);
307 buff = buff % winsize(card);
308 maxlen = winsize(card) - buff;
309
310 openwin(card, page);
311
312 if (len > maxlen) {
313 memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
314 openwin(card, page + 1);
315 memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
316 } else
317#endif
318 memcpy_fromio(skb->data, winbase(card) + buff, len);
319
320#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
321 /* select pkt_desc table page back */
322 openwin(card, 0);
323#endif
324 skb_put(skb, len);
325#ifdef DEBUG_PKT
326 printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
327 debug_frame(skb);
328#endif
329 dev->stats.rx_packets++;
330 dev->stats.rx_bytes += skb->len;
331 skb->protocol = hdlc_type_trans(skb, dev);
332 netif_rx(skb);
333}
334
335
336
337/* Receive DMA interrupt service */
338static inline void sca_rx_intr(port_t *port)
339{
340 struct net_device *dev = port_to_dev(port);
341 u16 dmac = get_dmac_rx(port);
342 card_t *card = port_to_card(port);
343 u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */
344
345 /* Reset DSR status bits */
346 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
347 DSR_RX(phy_node(port)), card);
348
349 if (stat & DSR_BOF)
350 /* Dropped one or more frames */
351 dev->stats.rx_over_errors++;
352
353 while (1) {
354 u32 desc_off = desc_offset(port, port->rxin, 0);
355 pkt_desc __iomem *desc;
356 u32 cda = sca_ina(dmac + CDAL, card);
357
358 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
359 break; /* No frame received */
360
361 desc = desc_address(port, port->rxin, 0);
362 stat = readb(&desc->stat);
363 if (!(stat & ST_RX_EOM))
364 port->rxpart = 1; /* partial frame received */
365 else if ((stat & ST_ERROR_MASK) || port->rxpart) {
366 dev->stats.rx_errors++;
367 if (stat & ST_RX_OVERRUN)
368 dev->stats.rx_fifo_errors++;
369 else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
370 ST_RX_RESBIT)) || port->rxpart)
371 dev->stats.rx_frame_errors++;
372 else if (stat & ST_RX_CRC)
373 dev->stats.rx_crc_errors++;
374 if (stat & ST_RX_EOM)
375 port->rxpart = 0; /* received last fragment */
376 } else
377 sca_rx(card, port, desc, port->rxin);
378
379 /* Set new error descriptor address */
380 sca_outa(desc_off, dmac + EDAL, card);
381 port->rxin = next_desc(port, port->rxin, 0);
382 }
383
384 /* make sure RX DMA is enabled */
385 sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
386}
387
388
389
390/* Transmit DMA interrupt service */
391static inline void sca_tx_intr(port_t *port)
392{
393 struct net_device *dev = port_to_dev(port);
394 u16 dmac = get_dmac_tx(port);
395 card_t* card = port_to_card(port);
396 u8 stat;
397
398 spin_lock(&port->lock);
399
400 stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */
401
402 /* Reset DSR status bits */
403 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
404 DSR_TX(phy_node(port)), card);
405
406 while (1) {
407 pkt_desc __iomem *desc;
408
409 u32 desc_off = desc_offset(port, port->txlast, 1);
410 u32 cda = sca_ina(dmac + CDAL, card);
411 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
412 break; /* Transmitter is/will_be sending this frame */
413
414 desc = desc_address(port, port->txlast, 1);
415 dev->stats.tx_packets++;
416 dev->stats.tx_bytes += readw(&desc->len);
417 writeb(0, &desc->stat); /* Free descriptor */
418 port->txlast = next_desc(port, port->txlast, 1);
419 }
420
421 netif_wake_queue(dev);
422 spin_unlock(&port->lock);
423}
424
425
426
427static irqreturn_t sca_intr(int irq, void* dev_id)
428{
429 card_t *card = dev_id;
430 int i;
431 u8 stat;
432 int handled = 0;
433
434#ifndef ALL_PAGES_ALWAYS_MAPPED
435 u8 page = sca_get_page(card);
436#endif
437
438 while((stat = sca_intr_status(card)) != 0) {
439 handled = 1;
440 for (i = 0; i < 2; i++) {
441 port_t *port = get_port(card, i);
442 if (port) {
443 if (stat & SCA_INTR_MSCI(i))
444 sca_msci_intr(port);
445
446 if (stat & SCA_INTR_DMAC_RX(i))
447 sca_rx_intr(port);
448
449 if (stat & SCA_INTR_DMAC_TX(i))
450 sca_tx_intr(port);
451 }
452 }
453 }
454
455#ifndef ALL_PAGES_ALWAYS_MAPPED
456 openwin(card, page); /* Restore original page */
457#endif
458 return IRQ_RETVAL(handled);
459}
460
461
462
463static void sca_set_port(port_t *port)
464{
465 card_t* card = port_to_card(port);
466 u16 msci = get_msci(port);
467 u8 md2 = sca_in(msci + MD2, card);
468 unsigned int tmc, br = 10, brv = 1024;
469
470
471 if (port->settings.clock_rate > 0) {
472 /* Try lower br for better accuracy*/
473 do {
474 br--;
475 brv >>= 1; /* brv = 2^9 = 512 max in specs */
476
477 /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
478 tmc = CLOCK_BASE / brv / port->settings.clock_rate;
479 }while (br > 1 && tmc <= 128);
480
481 if (tmc < 1) {
482 tmc = 1;
483 br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
484 brv = 1;
485 } else if (tmc > 255)
486 tmc = 256; /* tmc=0 means 256 - low baud rates */
487
488 port->settings.clock_rate = CLOCK_BASE / brv / tmc;
489 } else {
490 br = 9; /* Minimum clock rate */
491 tmc = 256; /* 8bit = 0 */
492 port->settings.clock_rate = CLOCK_BASE / (256 * 512);
493 }
494
495 port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
496 port->txs = (port->txs & ~CLK_BRG_MASK) | br;
497 port->tmc = tmc;
498
499 /* baud divisor - time constant*/
500#ifdef __HD64570_H
501 sca_out(port->tmc, msci + TMC, card);
502#else
503 sca_out(port->tmc, msci + TMCR, card);
504 sca_out(port->tmc, msci + TMCT, card);
505#endif
506
507 /* Set BRG bits */
508 sca_out(port->rxs, msci + RXS, card);
509 sca_out(port->txs, msci + TXS, card);
510
511 if (port->settings.loopback)
512 md2 |= MD2_LOOPBACK;
513 else
514 md2 &= ~MD2_LOOPBACK;
515
516 sca_out(md2, msci + MD2, card);
517
518}
519
520
521
522static void sca_open(struct net_device *dev)
523{
524 port_t *port = dev_to_port(dev);
525 card_t* card = port_to_card(port);
526 u16 msci = get_msci(port);
527 u8 md0, md2;
528
529 switch(port->encoding) {
530 case ENCODING_NRZ: md2 = MD2_NRZ; break;
531 case ENCODING_NRZI: md2 = MD2_NRZI; break;
532 case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
533 case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
534 default: md2 = MD2_MANCHESTER;
535 }
536
537 if (port->settings.loopback)
538 md2 |= MD2_LOOPBACK;
539
540 switch(port->parity) {
541 case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
542 case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
543#ifdef __HD64570_H
544 case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
545#else
546 case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
547#endif
548 case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
549 default: md0 = MD0_HDLC | MD0_CRC_NONE;
550 }
551
552 sca_out(CMD_RESET, msci + CMD, card);
553 sca_out(md0, msci + MD0, card);
554 sca_out(0x00, msci + MD1, card); /* no address field check */
555 sca_out(md2, msci + MD2, card);
556 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
557#ifdef __HD64570_H
558 sca_out(CTL_IDLE, msci + CTL, card);
559#else
560 /* Skip the rest of underrun frame */
561 sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
562#endif
563
564#ifdef __HD64570_H
565 /* Allow at least 8 bytes before requesting RX DMA operation */
566 /* TX with higher priority and possibly with shorter transfers */
567 sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
568 sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
569 sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
570#else
571 sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
572 sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
573 sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
574 sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
575 sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
576#endif
577
578/* We're using the following interrupts:
579 - TXINT (DMAC completed all transmisions, underrun or DCD change)
580 - all DMA interrupts
581*/
582
583 sca_set_carrier(port);
584
585#ifdef __HD64570_H
586 /* MSCI TX INT and RX INT A IRQ enable */
587 sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
588 sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
589 sca_out(sca_in(IER0, card) | (phy_node(port) ? 0xC0 : 0x0C),
590 IER0, card); /* TXINT and RXINT */
591 /* enable DMA IRQ */
592 sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
593 IER1, card);
594#else
595 /* MSCI TXINT and RXINTA interrupt enable */
596 sca_outl(IE0_TXINT | IE0_RXINTA | IE0_UDRN | IE0_CDCD, msci + IE0,
597 card);
598 /* DMA & MSCI IRQ enable */
599 sca_outl(sca_inl(IER0, card) |
600 (phy_node(port) ? 0x0A006600 : 0x000A0066), IER0, card);
601#endif
602
603#ifdef __HD64570_H
604 sca_out(port->tmc, msci + TMC, card); /* Restore registers */
605#else
606 sca_out(port->tmc, msci + TMCR, card);
607 sca_out(port->tmc, msci + TMCT, card);
608#endif
609 sca_out(port->rxs, msci + RXS, card);
610 sca_out(port->txs, msci + TXS, card);
611 sca_out(CMD_TX_ENABLE, msci + CMD, card);
612 sca_out(CMD_RX_ENABLE, msci + CMD, card);
613
614 netif_start_queue(dev);
615}
616
617
618
619static void sca_close(struct net_device *dev)
620{
621 port_t *port = dev_to_port(dev);
622 card_t* card = port_to_card(port);
623
624 /* reset channel */
625 sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
626#ifdef __HD64570_H
627 /* disable MSCI interrupts */
628 sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
629 IER0, card);
630 /* disable DMA interrupts */
631 sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
632 IER1, card);
633#else
634 /* disable DMA & MSCI IRQ */
635 sca_outl(sca_inl(IER0, card) &
636 (phy_node(port) ? 0x00FF00FF : 0xFF00FF00), IER0, card);
637#endif
638 netif_stop_queue(dev);
639}
640
641
642
643static int sca_attach(struct net_device *dev, unsigned short encoding,
644 unsigned short parity)
645{
646 if (encoding != ENCODING_NRZ &&
647 encoding != ENCODING_NRZI &&
648 encoding != ENCODING_FM_MARK &&
649 encoding != ENCODING_FM_SPACE &&
650 encoding != ENCODING_MANCHESTER)
651 return -EINVAL;
652
653 if (parity != PARITY_NONE &&
654 parity != PARITY_CRC16_PR0 &&
655 parity != PARITY_CRC16_PR1 &&
656#ifdef __HD64570_H
657 parity != PARITY_CRC16_PR0_CCITT &&
658#else
659 parity != PARITY_CRC32_PR1_CCITT &&
660#endif
661 parity != PARITY_CRC16_PR1_CCITT)
662 return -EINVAL;
663
664 dev_to_port(dev)->encoding = encoding;
665 dev_to_port(dev)->parity = parity;
666 return 0;
667}
668
669
670
671#ifdef DEBUG_RINGS
672static void sca_dump_rings(struct net_device *dev)
673{
674 port_t *port = dev_to_port(dev);
675 card_t *card = port_to_card(port);
676 u16 cnt;
677#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
678 u8 page;
679#endif
680
681#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
682 page = sca_get_page(card);
683 openwin(card, 0);
684#endif
685
686 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
687 sca_ina(get_dmac_rx(port) + CDAL, card),
688 sca_ina(get_dmac_rx(port) + EDAL, card),
689 sca_in(DSR_RX(phy_node(port)), card), port->rxin,
690 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE?"":"in");
691 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
692 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
693
694 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
695 "last=%u %sactive",
696 sca_ina(get_dmac_tx(port) + CDAL, card),
697 sca_ina(get_dmac_tx(port) + EDAL, card),
698 sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
699 sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
700
701 for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++)
702 printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
703 printk("\n");
704
705 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, "
706 "ST: %02x %02x %02x %02x"
707#ifdef __HD64572_H
708 " %02x"
709#endif
710 ", FST: %02x CST: %02x %02x\n",
711 sca_in(get_msci(port) + MD0, card),
712 sca_in(get_msci(port) + MD1, card),
713 sca_in(get_msci(port) + MD2, card),
714 sca_in(get_msci(port) + ST0, card),
715 sca_in(get_msci(port) + ST1, card),
716 sca_in(get_msci(port) + ST2, card),
717 sca_in(get_msci(port) + ST3, card),
718#ifdef __HD64572_H
719 sca_in(get_msci(port) + ST4, card),
720#endif
721 sca_in(get_msci(port) + FST, card),
722 sca_in(get_msci(port) + CST0, card),
723 sca_in(get_msci(port) + CST1, card));
724
725#ifdef __HD64572_H
726 printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
727 sca_inl(ISR0, card), sca_inl(ISR1, card));
728#else
729 printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
730 sca_in(ISR1, card), sca_in(ISR2, card));
731#endif
732
733#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
734 openwin(card, page); /* Restore original page */
735#endif
736}
737#endif /* DEBUG_RINGS */
738
739
740
741static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
742{
743 port_t *port = dev_to_port(dev);
744 card_t *card = port_to_card(port);
745 pkt_desc __iomem *desc;
746 u32 buff, len;
747#ifndef ALL_PAGES_ALWAYS_MAPPED
748 u8 page;
749 u32 maxlen;
750#endif
751
752 spin_lock_irq(&port->lock);
753
754 desc = desc_address(port, port->txin + 1, 1);
755 if (readb(&desc->stat)) { /* allow 1 packet gap */
756 /* should never happen - previous xmit should stop queue */
757#ifdef DEBUG_PKT
758 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
759#endif
760 netif_stop_queue(dev);
761 spin_unlock_irq(&port->lock);
762 return 1; /* request packet to be queued */
763 }
764
765#ifdef DEBUG_PKT
766 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
767 debug_frame(skb);
768#endif
769
770 desc = desc_address(port, port->txin, 1);
771 buff = buffer_offset(port, port->txin, 1);
772 len = skb->len;
773#ifndef ALL_PAGES_ALWAYS_MAPPED
774 page = buff / winsize(card);
775 buff = buff % winsize(card);
776 maxlen = winsize(card) - buff;
777
778 openwin(card, page);
779 if (len > maxlen) {
780 memcpy_toio(winbase(card) + buff, skb->data, maxlen);
781 openwin(card, page + 1);
782 memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
783 }
784 else
785#endif
786 memcpy_toio(winbase(card) + buff, skb->data, len);
787
788#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
789 openwin(card, 0); /* select pkt_desc table page back */
790#endif
791 writew(len, &desc->len);
792 writeb(ST_TX_EOM, &desc->stat);
793 dev->trans_start = jiffies;
794
795 port->txin = next_desc(port, port->txin, 1);
796 sca_outa(desc_offset(port, port->txin, 1),
797 get_dmac_tx(port) + EDAL, card);
798
799 sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
800
801 desc = desc_address(port, port->txin + 1, 1);
802 if (readb(&desc->stat)) /* allow 1 packet gap */
803 netif_stop_queue(dev);
804
805 spin_unlock_irq(&port->lock);
806
807 dev_kfree_skb(skb);
808 return 0;
809}
810
811
812
813#ifdef NEED_DETECT_RAM
814static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
815{
816 /* Round RAM size to 32 bits, fill from end to start */
817 u32 i = ramsize &= ~3;
818
819#ifndef ALL_PAGES_ALWAYS_MAPPED
820 u32 size = winsize(card);
821
822 openwin(card, (i - 4) / size); /* select last window */
823#endif
824 do {
825 i -= 4;
826#ifndef ALL_PAGES_ALWAYS_MAPPED
827 if ((i + 4) % size == 0)
828 openwin(card, i / size);
829 writel(i ^ 0x12345678, rambase + i % size);
830#else
831 writel(i ^ 0x12345678, rambase + i);
832#endif
833 }while (i > 0);
834
835 for (i = 0; i < ramsize ; i += 4) {
836#ifndef ALL_PAGES_ALWAYS_MAPPED
837 if (i % size == 0)
838 openwin(card, i / size);
839
840 if (readl(rambase + i % size) != (i ^ 0x12345678))
841 break;
842#else
843 if (readl(rambase + i) != (i ^ 0x12345678))
844 break;
845#endif
846 }
847
848 return i;
849}
850#endif /* NEED_DETECT_RAM */
851
852
853
854static void __devinit sca_init(card_t *card, int wait_states)
855{
856 sca_out(wait_states, WCRL, card); /* Wait Control */
857 sca_out(wait_states, WCRM, card);
858 sca_out(wait_states, WCRH, card);
859
860 sca_out(0, DMER, card); /* DMA Master disable */
861 sca_out(0x03, PCR, card); /* DMA priority */
862 sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
863 sca_out(0, DSR_TX(0), card);
864 sca_out(0, DSR_RX(1), card);
865 sca_out(0, DSR_TX(1), card);
866 sca_out(DMER_DME, DMER, card); /* DMA Master enable */
867}