aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/synclink.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/char/synclink.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/char/synclink.c')
-rw-r--r--drivers/char/synclink.c8214
1 files changed, 8214 insertions, 0 deletions
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
new file mode 100644
index 000000000000..37c8bea8e2b0
--- /dev/null
+++ b/drivers/char/synclink.c
@@ -0,0 +1,8214 @@
1/*
2 * linux/drivers/char/synclink.c
3 *
4 * $Id: synclink.c,v 4.28 2004/08/11 19:30:01 paulkf Exp $
5 *
6 * Device driver for Microgate SyncLink ISA and PCI
7 * high speed multiprotocol serial adapters.
8 *
9 * written by Paul Fulghum for Microgate Corporation
10 * paulkf@microgate.com
11 *
12 * Microgate and SyncLink are trademarks of Microgate Corporation
13 *
14 * Derived from serial.c written by Theodore Ts'o and Linus Torvalds
15 *
16 * Original release 01/11/99
17 *
18 * This code is released under the GNU General Public License (GPL)
19 *
20 * This driver is primarily intended for use in synchronous
21 * HDLC mode. Asynchronous mode is also provided.
22 *
23 * When operating in synchronous mode, each call to mgsl_write()
24 * contains exactly one complete HDLC frame. Calling mgsl_put_char
25 * will start assembling an HDLC frame that will not be sent until
26 * mgsl_flush_chars or mgsl_write is called.
27 *
28 * Synchronous receive data is reported as complete frames. To accomplish
29 * this, the TTY flip buffer is bypassed (too small to hold largest
30 * frame and may fragment frames) and the line discipline
31 * receive entry point is called directly.
32 *
33 * This driver has been tested with a slightly modified ppp.c driver
34 * for synchronous PPP.
35 *
36 * 2000/02/16
37 * Added interface for syncppp.c driver (an alternate synchronous PPP
38 * implementation that also supports Cisco HDLC). Each device instance
39 * registers as a tty device AND a network device (if dosyncppp option
40 * is set for the device). The functionality is determined by which
41 * device interface is opened.
42 *
43 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
44 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
46 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
47 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
48 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
49 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
51 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
52 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
53 * OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#if defined(__i386__)
57# define BREAKPOINT() asm(" int $3");
58#else
59# define BREAKPOINT() { }
60#endif
61
62#define MAX_ISA_DEVICES 10
63#define MAX_PCI_DEVICES 10
64#define MAX_TOTAL_DEVICES 20
65
66#include <linux/config.h>
67#include <linux/module.h>
68#include <linux/errno.h>
69#include <linux/signal.h>
70#include <linux/sched.h>
71#include <linux/timer.h>
72#include <linux/interrupt.h>
73#include <linux/pci.h>
74#include <linux/tty.h>
75#include <linux/tty_flip.h>
76#include <linux/serial.h>
77#include <linux/major.h>
78#include <linux/string.h>
79#include <linux/fcntl.h>
80#include <linux/ptrace.h>
81#include <linux/ioport.h>
82#include <linux/mm.h>
83#include <linux/slab.h>
84#include <linux/delay.h>
85
86#include <linux/netdevice.h>
87
88#include <linux/vmalloc.h>
89#include <linux/init.h>
90#include <asm/serial.h>
91
92#include <linux/delay.h>
93#include <linux/ioctl.h>
94
95#include <asm/system.h>
96#include <asm/io.h>
97#include <asm/irq.h>
98#include <asm/dma.h>
99#include <linux/bitops.h>
100#include <asm/types.h>
101#include <linux/termios.h>
102#include <linux/workqueue.h>
103#include <linux/hdlc.h>
104
105#ifdef CONFIG_HDLC_MODULE
106#define CONFIG_HDLC 1
107#endif
108
109#define GET_USER(error,value,addr) error = get_user(value,addr)
110#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
111#define PUT_USER(error,value,addr) error = put_user(value,addr)
112#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
113
114#include <asm/uaccess.h>
115
116#include "linux/synclink.h"
117
118#define RCLRVALUE 0xffff
119
120static MGSL_PARAMS default_params = {
121 MGSL_MODE_HDLC, /* unsigned long mode */
122 0, /* unsigned char loopback; */
123 HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
124 HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
125 0, /* unsigned long clock_speed; */
126 0xff, /* unsigned char addr_filter; */
127 HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
128 HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
129 HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
130 9600, /* unsigned long data_rate; */
131 8, /* unsigned char data_bits; */
132 1, /* unsigned char stop_bits; */
133 ASYNC_PARITY_NONE /* unsigned char parity; */
134};
135
136#define SHARED_MEM_ADDRESS_SIZE 0x40000
137#define BUFFERLISTSIZE (PAGE_SIZE)
138#define DMABUFFERSIZE (PAGE_SIZE)
139#define MAXRXFRAMES 7
140
141typedef struct _DMABUFFERENTRY
142{
143 u32 phys_addr; /* 32-bit flat physical address of data buffer */
144 u16 count; /* buffer size/data count */
145 u16 status; /* Control/status field */
146 u16 rcc; /* character count field */
147 u16 reserved; /* padding required by 16C32 */
148 u32 link; /* 32-bit flat link to next buffer entry */
149 char *virt_addr; /* virtual address of data buffer */
150 u32 phys_entry; /* physical address of this buffer entry */
151} DMABUFFERENTRY, *DMAPBUFFERENTRY;
152
153/* The queue of BH actions to be performed */
154
155#define BH_RECEIVE 1
156#define BH_TRANSMIT 2
157#define BH_STATUS 4
158
159#define IO_PIN_SHUTDOWN_LIMIT 100
160
161#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
162
163struct _input_signal_events {
164 int ri_up;
165 int ri_down;
166 int dsr_up;
167 int dsr_down;
168 int dcd_up;
169 int dcd_down;
170 int cts_up;
171 int cts_down;
172};
173
174/* transmit holding buffer definitions*/
175#define MAX_TX_HOLDING_BUFFERS 5
176struct tx_holding_buffer {
177 int buffer_size;
178 unsigned char * buffer;
179};
180
181
182/*
183 * Device instance data structure
184 */
185
186struct mgsl_struct {
187 int magic;
188 int flags;
189 int count; /* count of opens */
190 int line;
191 int hw_version;
192 unsigned short close_delay;
193 unsigned short closing_wait; /* time to wait before closing */
194
195 struct mgsl_icount icount;
196
197 struct tty_struct *tty;
198 int timeout;
199 int x_char; /* xon/xoff character */
200 int blocked_open; /* # of blocked opens */
201 u16 read_status_mask;
202 u16 ignore_status_mask;
203 unsigned char *xmit_buf;
204 int xmit_head;
205 int xmit_tail;
206 int xmit_cnt;
207
208 wait_queue_head_t open_wait;
209 wait_queue_head_t close_wait;
210
211 wait_queue_head_t status_event_wait_q;
212 wait_queue_head_t event_wait_q;
213 struct timer_list tx_timer; /* HDLC transmit timeout timer */
214 struct mgsl_struct *next_device; /* device list link */
215
216 spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
217 struct work_struct task; /* task structure for scheduling bh */
218
219 u32 EventMask; /* event trigger mask */
220 u32 RecordedEvents; /* pending events */
221
222 u32 max_frame_size; /* as set by device config */
223
224 u32 pending_bh;
225
226 int bh_running; /* Protection from multiple */
227 int isr_overflow;
228 int bh_requested;
229
230 int dcd_chkcount; /* check counts to prevent */
231 int cts_chkcount; /* too many IRQs if a signal */
232 int dsr_chkcount; /* is floating */
233 int ri_chkcount;
234
235 char *buffer_list; /* virtual address of Rx & Tx buffer lists */
236 unsigned long buffer_list_phys;
237
238 unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
239 DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
240 unsigned int current_rx_buffer;
241
242 int num_tx_dma_buffers; /* number of tx dma frames required */
243 int tx_dma_buffers_used;
244 unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
245 DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
246 int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
247 int current_tx_buffer; /* next tx dma buffer to be loaded */
248
249 unsigned char *intermediate_rxbuffer;
250
251 int num_tx_holding_buffers; /* number of tx holding buffer allocated */
252 int get_tx_holding_index; /* next tx holding buffer for adapter to load */
253 int put_tx_holding_index; /* next tx holding buffer to store user request */
254 int tx_holding_count; /* number of tx holding buffers waiting */
255 struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
256
257 int rx_enabled;
258 int rx_overflow;
259 int rx_rcc_underrun;
260
261 int tx_enabled;
262 int tx_active;
263 u32 idle_mode;
264
265 u16 cmr_value;
266 u16 tcsr_value;
267
268 char device_name[25]; /* device instance name */
269
270 unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
271 unsigned char bus; /* expansion bus number (zero based) */
272 unsigned char function; /* PCI device number */
273
274 unsigned int io_base; /* base I/O address of adapter */
275 unsigned int io_addr_size; /* size of the I/O address range */
276 int io_addr_requested; /* nonzero if I/O address requested */
277
278 unsigned int irq_level; /* interrupt level */
279 unsigned long irq_flags;
280 int irq_requested; /* nonzero if IRQ requested */
281
282 unsigned int dma_level; /* DMA channel */
283 int dma_requested; /* nonzero if dma channel requested */
284
285 u16 mbre_bit;
286 u16 loopback_bits;
287 u16 usc_idle_mode;
288
289 MGSL_PARAMS params; /* communications parameters */
290
291 unsigned char serial_signals; /* current serial signal states */
292
293 int irq_occurred; /* for diagnostics use */
294 unsigned int init_error; /* Initialization startup error (DIAGS) */
295 int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
296
297 u32 last_mem_alloc;
298 unsigned char* memory_base; /* shared memory address (PCI only) */
299 u32 phys_memory_base;
300 int shared_mem_requested;
301
302 unsigned char* lcr_base; /* local config registers (PCI only) */
303 u32 phys_lcr_base;
304 u32 lcr_offset;
305 int lcr_mem_requested;
306
307 u32 misc_ctrl_value;
308 char flag_buf[MAX_ASYNC_BUFFER_SIZE];
309 char char_buf[MAX_ASYNC_BUFFER_SIZE];
310 BOOLEAN drop_rts_on_tx_done;
311
312 BOOLEAN loopmode_insert_requested;
313 BOOLEAN loopmode_send_done_requested;
314
315 struct _input_signal_events input_signal_events;
316
317 /* generic HDLC device parts */
318 int netcount;
319 int dosyncppp;
320 spinlock_t netlock;
321
322#ifdef CONFIG_HDLC
323 struct net_device *netdev;
324#endif
325};
326
327#define MGSL_MAGIC 0x5401
328
329/*
330 * The size of the serial xmit buffer is 1 page, or 4096 bytes
331 */
332#ifndef SERIAL_XMIT_SIZE
333#define SERIAL_XMIT_SIZE 4096
334#endif
335
336/*
337 * These macros define the offsets used in calculating the
338 * I/O address of the specified USC registers.
339 */
340
341
342#define DCPIN 2 /* Bit 1 of I/O address */
343#define SDPIN 4 /* Bit 2 of I/O address */
344
345#define DCAR 0 /* DMA command/address register */
346#define CCAR SDPIN /* channel command/address register */
347#define DATAREG DCPIN + SDPIN /* serial data register */
348#define MSBONLY 0x41
349#define LSBONLY 0x40
350
351/*
352 * These macros define the register address (ordinal number)
353 * used for writing address/value pairs to the USC.
354 */
355
356#define CMR 0x02 /* Channel mode Register */
357#define CCSR 0x04 /* Channel Command/status Register */
358#define CCR 0x06 /* Channel Control Register */
359#define PSR 0x08 /* Port status Register */
360#define PCR 0x0a /* Port Control Register */
361#define TMDR 0x0c /* Test mode Data Register */
362#define TMCR 0x0e /* Test mode Control Register */
363#define CMCR 0x10 /* Clock mode Control Register */
364#define HCR 0x12 /* Hardware Configuration Register */
365#define IVR 0x14 /* Interrupt Vector Register */
366#define IOCR 0x16 /* Input/Output Control Register */
367#define ICR 0x18 /* Interrupt Control Register */
368#define DCCR 0x1a /* Daisy Chain Control Register */
369#define MISR 0x1c /* Misc Interrupt status Register */
370#define SICR 0x1e /* status Interrupt Control Register */
371#define RDR 0x20 /* Receive Data Register */
372#define RMR 0x22 /* Receive mode Register */
373#define RCSR 0x24 /* Receive Command/status Register */
374#define RICR 0x26 /* Receive Interrupt Control Register */
375#define RSR 0x28 /* Receive Sync Register */
376#define RCLR 0x2a /* Receive count Limit Register */
377#define RCCR 0x2c /* Receive Character count Register */
378#define TC0R 0x2e /* Time Constant 0 Register */
379#define TDR 0x30 /* Transmit Data Register */
380#define TMR 0x32 /* Transmit mode Register */
381#define TCSR 0x34 /* Transmit Command/status Register */
382#define TICR 0x36 /* Transmit Interrupt Control Register */
383#define TSR 0x38 /* Transmit Sync Register */
384#define TCLR 0x3a /* Transmit count Limit Register */
385#define TCCR 0x3c /* Transmit Character count Register */
386#define TC1R 0x3e /* Time Constant 1 Register */
387
388
389/*
390 * MACRO DEFINITIONS FOR DMA REGISTERS
391 */
392
393#define DCR 0x06 /* DMA Control Register (shared) */
394#define DACR 0x08 /* DMA Array count Register (shared) */
395#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
396#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
397#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
398#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
399#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
400
401#define TDMR 0x02 /* Transmit DMA mode Register */
402#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
403#define TBCR 0x2a /* Transmit Byte count Register */
404#define TARL 0x2c /* Transmit Address Register (low) */
405#define TARU 0x2e /* Transmit Address Register (high) */
406#define NTBCR 0x3a /* Next Transmit Byte count Register */
407#define NTARL 0x3c /* Next Transmit Address Register (low) */
408#define NTARU 0x3e /* Next Transmit Address Register (high) */
409
410#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
411#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
412#define RBCR 0xaa /* Receive Byte count Register */
413#define RARL 0xac /* Receive Address Register (low) */
414#define RARU 0xae /* Receive Address Register (high) */
415#define NRBCR 0xba /* Next Receive Byte count Register */
416#define NRARL 0xbc /* Next Receive Address Register (low) */
417#define NRARU 0xbe /* Next Receive Address Register (high) */
418
419
420/*
421 * MACRO DEFINITIONS FOR MODEM STATUS BITS
422 */
423
424#define MODEMSTATUS_DTR 0x80
425#define MODEMSTATUS_DSR 0x40
426#define MODEMSTATUS_RTS 0x20
427#define MODEMSTATUS_CTS 0x10
428#define MODEMSTATUS_RI 0x04
429#define MODEMSTATUS_DCD 0x01
430
431
432/*
433 * Channel Command/Address Register (CCAR) Command Codes
434 */
435
436#define RTCmd_Null 0x0000
437#define RTCmd_ResetHighestIus 0x1000
438#define RTCmd_TriggerChannelLoadDma 0x2000
439#define RTCmd_TriggerRxDma 0x2800
440#define RTCmd_TriggerTxDma 0x3000
441#define RTCmd_TriggerRxAndTxDma 0x3800
442#define RTCmd_PurgeRxFifo 0x4800
443#define RTCmd_PurgeTxFifo 0x5000
444#define RTCmd_PurgeRxAndTxFifo 0x5800
445#define RTCmd_LoadRcc 0x6800
446#define RTCmd_LoadTcc 0x7000
447#define RTCmd_LoadRccAndTcc 0x7800
448#define RTCmd_LoadTC0 0x8800
449#define RTCmd_LoadTC1 0x9000
450#define RTCmd_LoadTC0AndTC1 0x9800
451#define RTCmd_SerialDataLSBFirst 0xa000
452#define RTCmd_SerialDataMSBFirst 0xa800
453#define RTCmd_SelectBigEndian 0xb000
454#define RTCmd_SelectLittleEndian 0xb800
455
456
457/*
458 * DMA Command/Address Register (DCAR) Command Codes
459 */
460
461#define DmaCmd_Null 0x0000
462#define DmaCmd_ResetTxChannel 0x1000
463#define DmaCmd_ResetRxChannel 0x1200
464#define DmaCmd_StartTxChannel 0x2000
465#define DmaCmd_StartRxChannel 0x2200
466#define DmaCmd_ContinueTxChannel 0x3000
467#define DmaCmd_ContinueRxChannel 0x3200
468#define DmaCmd_PauseTxChannel 0x4000
469#define DmaCmd_PauseRxChannel 0x4200
470#define DmaCmd_AbortTxChannel 0x5000
471#define DmaCmd_AbortRxChannel 0x5200
472#define DmaCmd_InitTxChannel 0x7000
473#define DmaCmd_InitRxChannel 0x7200
474#define DmaCmd_ResetHighestDmaIus 0x8000
475#define DmaCmd_ResetAllChannels 0x9000
476#define DmaCmd_StartAllChannels 0xa000
477#define DmaCmd_ContinueAllChannels 0xb000
478#define DmaCmd_PauseAllChannels 0xc000
479#define DmaCmd_AbortAllChannels 0xd000
480#define DmaCmd_InitAllChannels 0xf000
481
482#define TCmd_Null 0x0000
483#define TCmd_ClearTxCRC 0x2000
484#define TCmd_SelectTicrTtsaData 0x4000
485#define TCmd_SelectTicrTxFifostatus 0x5000
486#define TCmd_SelectTicrIntLevel 0x6000
487#define TCmd_SelectTicrdma_level 0x7000
488#define TCmd_SendFrame 0x8000
489#define TCmd_SendAbort 0x9000
490#define TCmd_EnableDleInsertion 0xc000
491#define TCmd_DisableDleInsertion 0xd000
492#define TCmd_ClearEofEom 0xe000
493#define TCmd_SetEofEom 0xf000
494
495#define RCmd_Null 0x0000
496#define RCmd_ClearRxCRC 0x2000
497#define RCmd_EnterHuntmode 0x3000
498#define RCmd_SelectRicrRtsaData 0x4000
499#define RCmd_SelectRicrRxFifostatus 0x5000
500#define RCmd_SelectRicrIntLevel 0x6000
501#define RCmd_SelectRicrdma_level 0x7000
502
503/*
504 * Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
505 */
506
507#define RECEIVE_STATUS BIT5
508#define RECEIVE_DATA BIT4
509#define TRANSMIT_STATUS BIT3
510#define TRANSMIT_DATA BIT2
511#define IO_PIN BIT1
512#define MISC BIT0
513
514
515/*
516 * Receive status Bits in Receive Command/status Register RCSR
517 */
518
519#define RXSTATUS_SHORT_FRAME BIT8
520#define RXSTATUS_CODE_VIOLATION BIT8
521#define RXSTATUS_EXITED_HUNT BIT7
522#define RXSTATUS_IDLE_RECEIVED BIT6
523#define RXSTATUS_BREAK_RECEIVED BIT5
524#define RXSTATUS_ABORT_RECEIVED BIT5
525#define RXSTATUS_RXBOUND BIT4
526#define RXSTATUS_CRC_ERROR BIT3
527#define RXSTATUS_FRAMING_ERROR BIT3
528#define RXSTATUS_ABORT BIT2
529#define RXSTATUS_PARITY_ERROR BIT2
530#define RXSTATUS_OVERRUN BIT1
531#define RXSTATUS_DATA_AVAILABLE BIT0
532#define RXSTATUS_ALL 0x01f6
533#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
534
535/*
536 * Values for setting transmit idle mode in
537 * Transmit Control/status Register (TCSR)
538 */
539#define IDLEMODE_FLAGS 0x0000
540#define IDLEMODE_ALT_ONE_ZERO 0x0100
541#define IDLEMODE_ZERO 0x0200
542#define IDLEMODE_ONE 0x0300
543#define IDLEMODE_ALT_MARK_SPACE 0x0500
544#define IDLEMODE_SPACE 0x0600
545#define IDLEMODE_MARK 0x0700
546#define IDLEMODE_MASK 0x0700
547
548/*
549 * IUSC revision identifiers
550 */
551#define IUSC_SL1660 0x4d44
552#define IUSC_PRE_SL1660 0x4553
553
554/*
555 * Transmit status Bits in Transmit Command/status Register (TCSR)
556 */
557
558#define TCSR_PRESERVE 0x0F00
559
560#define TCSR_UNDERWAIT BIT11
561#define TXSTATUS_PREAMBLE_SENT BIT7
562#define TXSTATUS_IDLE_SENT BIT6
563#define TXSTATUS_ABORT_SENT BIT5
564#define TXSTATUS_EOF_SENT BIT4
565#define TXSTATUS_EOM_SENT BIT4
566#define TXSTATUS_CRC_SENT BIT3
567#define TXSTATUS_ALL_SENT BIT2
568#define TXSTATUS_UNDERRUN BIT1
569#define TXSTATUS_FIFO_EMPTY BIT0
570#define TXSTATUS_ALL 0x00fa
571#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
572
573
574#define MISCSTATUS_RXC_LATCHED BIT15
575#define MISCSTATUS_RXC BIT14
576#define MISCSTATUS_TXC_LATCHED BIT13
577#define MISCSTATUS_TXC BIT12
578#define MISCSTATUS_RI_LATCHED BIT11
579#define MISCSTATUS_RI BIT10
580#define MISCSTATUS_DSR_LATCHED BIT9
581#define MISCSTATUS_DSR BIT8
582#define MISCSTATUS_DCD_LATCHED BIT7
583#define MISCSTATUS_DCD BIT6
584#define MISCSTATUS_CTS_LATCHED BIT5
585#define MISCSTATUS_CTS BIT4
586#define MISCSTATUS_RCC_UNDERRUN BIT3
587#define MISCSTATUS_DPLL_NO_SYNC BIT2
588#define MISCSTATUS_BRG1_ZERO BIT1
589#define MISCSTATUS_BRG0_ZERO BIT0
590
591#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
592#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
593
594#define SICR_RXC_ACTIVE BIT15
595#define SICR_RXC_INACTIVE BIT14
596#define SICR_RXC (BIT15+BIT14)
597#define SICR_TXC_ACTIVE BIT13
598#define SICR_TXC_INACTIVE BIT12
599#define SICR_TXC (BIT13+BIT12)
600#define SICR_RI_ACTIVE BIT11
601#define SICR_RI_INACTIVE BIT10
602#define SICR_RI (BIT11+BIT10)
603#define SICR_DSR_ACTIVE BIT9
604#define SICR_DSR_INACTIVE BIT8
605#define SICR_DSR (BIT9+BIT8)
606#define SICR_DCD_ACTIVE BIT7
607#define SICR_DCD_INACTIVE BIT6
608#define SICR_DCD (BIT7+BIT6)
609#define SICR_CTS_ACTIVE BIT5
610#define SICR_CTS_INACTIVE BIT4
611#define SICR_CTS (BIT5+BIT4)
612#define SICR_RCC_UNDERFLOW BIT3
613#define SICR_DPLL_NO_SYNC BIT2
614#define SICR_BRG1_ZERO BIT1
615#define SICR_BRG0_ZERO BIT0
616
617void usc_DisableMasterIrqBit( struct mgsl_struct *info );
618void usc_EnableMasterIrqBit( struct mgsl_struct *info );
619void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
620void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
621void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
622
623#define usc_EnableInterrupts( a, b ) \
624 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
625
626#define usc_DisableInterrupts( a, b ) \
627 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
628
629#define usc_EnableMasterIrqBit(a) \
630 usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
631
632#define usc_DisableMasterIrqBit(a) \
633 usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
634
635#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
636
637/*
638 * Transmit status Bits in Transmit Control status Register (TCSR)
639 * and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
640 */
641
642#define TXSTATUS_PREAMBLE_SENT BIT7
643#define TXSTATUS_IDLE_SENT BIT6
644#define TXSTATUS_ABORT_SENT BIT5
645#define TXSTATUS_EOF BIT4
646#define TXSTATUS_CRC_SENT BIT3
647#define TXSTATUS_ALL_SENT BIT2
648#define TXSTATUS_UNDERRUN BIT1
649#define TXSTATUS_FIFO_EMPTY BIT0
650
651#define DICR_MASTER BIT15
652#define DICR_TRANSMIT BIT0
653#define DICR_RECEIVE BIT1
654
655#define usc_EnableDmaInterrupts(a,b) \
656 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
657
658#define usc_DisableDmaInterrupts(a,b) \
659 usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
660
661#define usc_EnableStatusIrqs(a,b) \
662 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
663
664#define usc_DisablestatusIrqs(a,b) \
665 usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
666
667/* Transmit status Bits in Transmit Control status Register (TCSR) */
668/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
669
670
671#define DISABLE_UNCONDITIONAL 0
672#define DISABLE_END_OF_FRAME 1
673#define ENABLE_UNCONDITIONAL 2
674#define ENABLE_AUTO_CTS 3
675#define ENABLE_AUTO_DCD 3
676#define usc_EnableTransmitter(a,b) \
677 usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
678#define usc_EnableReceiver(a,b) \
679 usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
680
681static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
682static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
683static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
684
685static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
686static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
687static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
688void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
689void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
690
691#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
692#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
693
694#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
695
696static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
697static void usc_start_receiver( struct mgsl_struct *info );
698static void usc_stop_receiver( struct mgsl_struct *info );
699
700static void usc_start_transmitter( struct mgsl_struct *info );
701static void usc_stop_transmitter( struct mgsl_struct *info );
702static void usc_set_txidle( struct mgsl_struct *info );
703static void usc_load_txfifo( struct mgsl_struct *info );
704
705static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
706static void usc_enable_loopback( struct mgsl_struct *info, int enable );
707
708static void usc_get_serial_signals( struct mgsl_struct *info );
709static void usc_set_serial_signals( struct mgsl_struct *info );
710
711static void usc_reset( struct mgsl_struct *info );
712
713static void usc_set_sync_mode( struct mgsl_struct *info );
714static void usc_set_sdlc_mode( struct mgsl_struct *info );
715static void usc_set_async_mode( struct mgsl_struct *info );
716static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
717
718static void usc_loopback_frame( struct mgsl_struct *info );
719
720static void mgsl_tx_timeout(unsigned long context);
721
722
723static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
724static void usc_loopmode_insert_request( struct mgsl_struct * info );
725static int usc_loopmode_active( struct mgsl_struct * info);
726static void usc_loopmode_send_done( struct mgsl_struct * info );
727
728static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
729
730#ifdef CONFIG_HDLC
731#define dev_to_port(D) (dev_to_hdlc(D)->priv)
732static void hdlcdev_tx_done(struct mgsl_struct *info);
733static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
734static int hdlcdev_init(struct mgsl_struct *info);
735static void hdlcdev_exit(struct mgsl_struct *info);
736#endif
737
738/*
739 * Defines a BUS descriptor value for the PCI adapter
740 * local bus address ranges.
741 */
742
743#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
744(0x00400020 + \
745((WrHold) << 30) + \
746((WrDly) << 28) + \
747((RdDly) << 26) + \
748((Nwdd) << 20) + \
749((Nwad) << 15) + \
750((Nxda) << 13) + \
751((Nrdd) << 11) + \
752((Nrad) << 6) )
753
754static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
755
756/*
757 * Adapter diagnostic routines
758 */
759static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
760static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
761static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
762static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
763static int mgsl_adapter_test( struct mgsl_struct *info );
764
765/*
766 * device and resource management routines
767 */
768static int mgsl_claim_resources(struct mgsl_struct *info);
769static void mgsl_release_resources(struct mgsl_struct *info);
770static void mgsl_add_device(struct mgsl_struct *info);
771static struct mgsl_struct* mgsl_allocate_device(void);
772
773/*
774 * DMA buffer manupulation functions.
775 */
776static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
777static int mgsl_get_rx_frame( struct mgsl_struct *info );
778static int mgsl_get_raw_rx_frame( struct mgsl_struct *info );
779static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
780static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
781static int num_free_tx_dma_buffers(struct mgsl_struct *info);
782static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
783static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
784
785/*
786 * DMA and Shared Memory buffer allocation and formatting
787 */
788static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
789static void mgsl_free_dma_buffers(struct mgsl_struct *info);
790static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
791static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
792static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
793static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
794static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
795static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
796static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
797static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
798static int load_next_tx_holding_buffer(struct mgsl_struct *info);
799static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
800
801/*
802 * Bottom half interrupt handlers
803 */
804static void mgsl_bh_handler(void* Context);
805static void mgsl_bh_receive(struct mgsl_struct *info);
806static void mgsl_bh_transmit(struct mgsl_struct *info);
807static void mgsl_bh_status(struct mgsl_struct *info);
808
809/*
810 * Interrupt handler routines and dispatch table.
811 */
812static void mgsl_isr_null( struct mgsl_struct *info );
813static void mgsl_isr_transmit_data( struct mgsl_struct *info );
814static void mgsl_isr_receive_data( struct mgsl_struct *info );
815static void mgsl_isr_receive_status( struct mgsl_struct *info );
816static void mgsl_isr_transmit_status( struct mgsl_struct *info );
817static void mgsl_isr_io_pin( struct mgsl_struct *info );
818static void mgsl_isr_misc( struct mgsl_struct *info );
819static void mgsl_isr_receive_dma( struct mgsl_struct *info );
820static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
821
822typedef void (*isr_dispatch_func)(struct mgsl_struct *);
823
824static isr_dispatch_func UscIsrTable[7] =
825{
826 mgsl_isr_null,
827 mgsl_isr_misc,
828 mgsl_isr_io_pin,
829 mgsl_isr_transmit_data,
830 mgsl_isr_transmit_status,
831 mgsl_isr_receive_data,
832 mgsl_isr_receive_status
833};
834
835/*
836 * ioctl call handlers
837 */
838static int tiocmget(struct tty_struct *tty, struct file *file);
839static int tiocmset(struct tty_struct *tty, struct file *file,
840 unsigned int set, unsigned int clear);
841static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
842 __user *user_icount);
843static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
844static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
845static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
846static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
847static int mgsl_txenable(struct mgsl_struct * info, int enable);
848static int mgsl_txabort(struct mgsl_struct * info);
849static int mgsl_rxenable(struct mgsl_struct * info, int enable);
850static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
851static int mgsl_loopmode_send_done( struct mgsl_struct * info );
852
853/* set non-zero on successful registration with PCI subsystem */
854static int pci_registered;
855
856/*
857 * Global linked list of SyncLink devices
858 */
859static struct mgsl_struct *mgsl_device_list;
860static int mgsl_device_count;
861
862/*
863 * Set this param to non-zero to load eax with the
864 * .text section address and breakpoint on module load.
865 * This is useful for use with gdb and add-symbol-file command.
866 */
867static int break_on_load;
868
869/*
870 * Driver major number, defaults to zero to get auto
871 * assigned major number. May be forced as module parameter.
872 */
873static int ttymajor;
874
875/*
876 * Array of user specified options for ISA adapters.
877 */
878static int io[MAX_ISA_DEVICES];
879static int irq[MAX_ISA_DEVICES];
880static int dma[MAX_ISA_DEVICES];
881static int debug_level;
882static int maxframe[MAX_TOTAL_DEVICES];
883static int dosyncppp[MAX_TOTAL_DEVICES];
884static int txdmabufs[MAX_TOTAL_DEVICES];
885static int txholdbufs[MAX_TOTAL_DEVICES];
886
887module_param(break_on_load, bool, 0);
888module_param(ttymajor, int, 0);
889module_param_array(io, int, NULL, 0);
890module_param_array(irq, int, NULL, 0);
891module_param_array(dma, int, NULL, 0);
892module_param(debug_level, int, 0);
893module_param_array(maxframe, int, NULL, 0);
894module_param_array(dosyncppp, int, NULL, 0);
895module_param_array(txdmabufs, int, NULL, 0);
896module_param_array(txholdbufs, int, NULL, 0);
897
898static char *driver_name = "SyncLink serial driver";
899static char *driver_version = "$Revision: 4.28 $";
900
901static int synclink_init_one (struct pci_dev *dev,
902 const struct pci_device_id *ent);
903static void synclink_remove_one (struct pci_dev *dev);
904
905static struct pci_device_id synclink_pci_tbl[] = {
906 { PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
907 { PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
908 { 0, }, /* terminate list */
909};
910MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
911
912MODULE_LICENSE("GPL");
913
914static struct pci_driver synclink_pci_driver = {
915 .name = "synclink",
916 .id_table = synclink_pci_tbl,
917 .probe = synclink_init_one,
918 .remove = __devexit_p(synclink_remove_one),
919};
920
921static struct tty_driver *serial_driver;
922
923/* number of characters left in xmit buffer before we ask for more */
924#define WAKEUP_CHARS 256
925
926
927static void mgsl_change_params(struct mgsl_struct *info);
928static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
929
930/*
931 * 1st function defined in .text section. Calling this function in
932 * init_module() followed by a breakpoint allows a remote debugger
933 * (gdb) to get the .text address for the add-symbol-file command.
934 * This allows remote debugging of dynamically loadable modules.
935 */
936static void* mgsl_get_text_ptr(void)
937{
938 return mgsl_get_text_ptr;
939}
940
941/*
942 * tmp_buf is used as a temporary buffer by mgsl_write. We need to
943 * lock it in case the COPY_FROM_USER blocks while swapping in a page,
944 * and some other program tries to do a serial write at the same time.
945 * Since the lock will only come under contention when the system is
946 * swapping and available memory is low, it makes sense to share one
947 * buffer across all the serial ioports, since it significantly saves
948 * memory if large numbers of serial ports are open.
949 */
950static unsigned char *tmp_buf;
951static DECLARE_MUTEX(tmp_buf_sem);
952
953static inline int mgsl_paranoia_check(struct mgsl_struct *info,
954 char *name, const char *routine)
955{
956#ifdef MGSL_PARANOIA_CHECK
957 static const char *badmagic =
958 "Warning: bad magic number for mgsl struct (%s) in %s\n";
959 static const char *badinfo =
960 "Warning: null mgsl_struct for (%s) in %s\n";
961
962 if (!info) {
963 printk(badinfo, name, routine);
964 return 1;
965 }
966 if (info->magic != MGSL_MAGIC) {
967 printk(badmagic, name, routine);
968 return 1;
969 }
970#else
971 if (!info)
972 return 1;
973#endif
974 return 0;
975}
976
977/**
978 * line discipline callback wrappers
979 *
980 * The wrappers maintain line discipline references
981 * while calling into the line discipline.
982 *
983 * ldisc_receive_buf - pass receive data to line discipline
984 */
985
986static void ldisc_receive_buf(struct tty_struct *tty,
987 const __u8 *data, char *flags, int count)
988{
989 struct tty_ldisc *ld;
990 if (!tty)
991 return;
992 ld = tty_ldisc_ref(tty);
993 if (ld) {
994 if (ld->receive_buf)
995 ld->receive_buf(tty, data, flags, count);
996 tty_ldisc_deref(ld);
997 }
998}
999
1000/* mgsl_stop() throttle (stop) transmitter
1001 *
1002 * Arguments: tty pointer to tty info structure
1003 * Return Value: None
1004 */
1005static void mgsl_stop(struct tty_struct *tty)
1006{
1007 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1008 unsigned long flags;
1009
1010 if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
1011 return;
1012
1013 if ( debug_level >= DEBUG_LEVEL_INFO )
1014 printk("mgsl_stop(%s)\n",info->device_name);
1015
1016 spin_lock_irqsave(&info->irq_spinlock,flags);
1017 if (info->tx_enabled)
1018 usc_stop_transmitter(info);
1019 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1020
1021} /* end of mgsl_stop() */
1022
1023/* mgsl_start() release (start) transmitter
1024 *
1025 * Arguments: tty pointer to tty info structure
1026 * Return Value: None
1027 */
1028static void mgsl_start(struct tty_struct *tty)
1029{
1030 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
1031 unsigned long flags;
1032
1033 if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
1034 return;
1035
1036 if ( debug_level >= DEBUG_LEVEL_INFO )
1037 printk("mgsl_start(%s)\n",info->device_name);
1038
1039 spin_lock_irqsave(&info->irq_spinlock,flags);
1040 if (!info->tx_enabled)
1041 usc_start_transmitter(info);
1042 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1043
1044} /* end of mgsl_start() */
1045
1046/*
1047 * Bottom half work queue access functions
1048 */
1049
1050/* mgsl_bh_action() Return next bottom half action to perform.
1051 * Return Value: BH action code or 0 if nothing to do.
1052 */
1053static int mgsl_bh_action(struct mgsl_struct *info)
1054{
1055 unsigned long flags;
1056 int rc = 0;
1057
1058 spin_lock_irqsave(&info->irq_spinlock,flags);
1059
1060 if (info->pending_bh & BH_RECEIVE) {
1061 info->pending_bh &= ~BH_RECEIVE;
1062 rc = BH_RECEIVE;
1063 } else if (info->pending_bh & BH_TRANSMIT) {
1064 info->pending_bh &= ~BH_TRANSMIT;
1065 rc = BH_TRANSMIT;
1066 } else if (info->pending_bh & BH_STATUS) {
1067 info->pending_bh &= ~BH_STATUS;
1068 rc = BH_STATUS;
1069 }
1070
1071 if (!rc) {
1072 /* Mark BH routine as complete */
1073 info->bh_running = 0;
1074 info->bh_requested = 0;
1075 }
1076
1077 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1078
1079 return rc;
1080}
1081
1082/*
1083 * Perform bottom half processing of work items queued by ISR.
1084 */
1085static void mgsl_bh_handler(void* Context)
1086{
1087 struct mgsl_struct *info = (struct mgsl_struct*)Context;
1088 int action;
1089
1090 if (!info)
1091 return;
1092
1093 if ( debug_level >= DEBUG_LEVEL_BH )
1094 printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
1095 __FILE__,__LINE__,info->device_name);
1096
1097 info->bh_running = 1;
1098
1099 while((action = mgsl_bh_action(info)) != 0) {
1100
1101 /* Process work item */
1102 if ( debug_level >= DEBUG_LEVEL_BH )
1103 printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
1104 __FILE__,__LINE__,action);
1105
1106 switch (action) {
1107
1108 case BH_RECEIVE:
1109 mgsl_bh_receive(info);
1110 break;
1111 case BH_TRANSMIT:
1112 mgsl_bh_transmit(info);
1113 break;
1114 case BH_STATUS:
1115 mgsl_bh_status(info);
1116 break;
1117 default:
1118 /* unknown work item ID */
1119 printk("Unknown work item ID=%08X!\n", action);
1120 break;
1121 }
1122 }
1123
1124 if ( debug_level >= DEBUG_LEVEL_BH )
1125 printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
1126 __FILE__,__LINE__,info->device_name);
1127}
1128
1129static void mgsl_bh_receive(struct mgsl_struct *info)
1130{
1131 int (*get_rx_frame)(struct mgsl_struct *info) =
1132 (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
1133
1134 if ( debug_level >= DEBUG_LEVEL_BH )
1135 printk( "%s(%d):mgsl_bh_receive(%s)\n",
1136 __FILE__,__LINE__,info->device_name);
1137
1138 do
1139 {
1140 if (info->rx_rcc_underrun) {
1141 unsigned long flags;
1142 spin_lock_irqsave(&info->irq_spinlock,flags);
1143 usc_start_receiver(info);
1144 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1145 return;
1146 }
1147 } while(get_rx_frame(info));
1148}
1149
1150static void mgsl_bh_transmit(struct mgsl_struct *info)
1151{
1152 struct tty_struct *tty = info->tty;
1153 unsigned long flags;
1154
1155 if ( debug_level >= DEBUG_LEVEL_BH )
1156 printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
1157 __FILE__,__LINE__,info->device_name);
1158
1159 if (tty) {
1160 tty_wakeup(tty);
1161 wake_up_interruptible(&tty->write_wait);
1162 }
1163
1164 /* if transmitter idle and loopmode_send_done_requested
1165 * then start echoing RxD to TxD
1166 */
1167 spin_lock_irqsave(&info->irq_spinlock,flags);
1168 if ( !info->tx_active && info->loopmode_send_done_requested )
1169 usc_loopmode_send_done( info );
1170 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1171}
1172
1173static void mgsl_bh_status(struct mgsl_struct *info)
1174{
1175 if ( debug_level >= DEBUG_LEVEL_BH )
1176 printk( "%s(%d):mgsl_bh_status() entry on %s\n",
1177 __FILE__,__LINE__,info->device_name);
1178
1179 info->ri_chkcount = 0;
1180 info->dsr_chkcount = 0;
1181 info->dcd_chkcount = 0;
1182 info->cts_chkcount = 0;
1183}
1184
1185/* mgsl_isr_receive_status()
1186 *
1187 * Service a receive status interrupt. The type of status
1188 * interrupt is indicated by the state of the RCSR.
1189 * This is only used for HDLC mode.
1190 *
1191 * Arguments: info pointer to device instance data
1192 * Return Value: None
1193 */
1194static void mgsl_isr_receive_status( struct mgsl_struct *info )
1195{
1196 u16 status = usc_InReg( info, RCSR );
1197
1198 if ( debug_level >= DEBUG_LEVEL_ISR )
1199 printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
1200 __FILE__,__LINE__,status);
1201
1202 if ( (status & RXSTATUS_ABORT_RECEIVED) &&
1203 info->loopmode_insert_requested &&
1204 usc_loopmode_active(info) )
1205 {
1206 ++info->icount.rxabort;
1207 info->loopmode_insert_requested = FALSE;
1208
1209 /* clear CMR:13 to start echoing RxD to TxD */
1210 info->cmr_value &= ~BIT13;
1211 usc_OutReg(info, CMR, info->cmr_value);
1212
1213 /* disable received abort irq (no longer required) */
1214 usc_OutReg(info, RICR,
1215 (usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
1216 }
1217
1218 if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
1219 if (status & RXSTATUS_EXITED_HUNT)
1220 info->icount.exithunt++;
1221 if (status & RXSTATUS_IDLE_RECEIVED)
1222 info->icount.rxidle++;
1223 wake_up_interruptible(&info->event_wait_q);
1224 }
1225
1226 if (status & RXSTATUS_OVERRUN){
1227 info->icount.rxover++;
1228 usc_process_rxoverrun_sync( info );
1229 }
1230
1231 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
1232 usc_UnlatchRxstatusBits( info, status );
1233
1234} /* end of mgsl_isr_receive_status() */
1235
1236/* mgsl_isr_transmit_status()
1237 *
1238 * Service a transmit status interrupt
1239 * HDLC mode :end of transmit frame
1240 * Async mode:all data is sent
1241 * transmit status is indicated by bits in the TCSR.
1242 *
1243 * Arguments: info pointer to device instance data
1244 * Return Value: None
1245 */
1246static void mgsl_isr_transmit_status( struct mgsl_struct *info )
1247{
1248 u16 status = usc_InReg( info, TCSR );
1249
1250 if ( debug_level >= DEBUG_LEVEL_ISR )
1251 printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
1252 __FILE__,__LINE__,status);
1253
1254 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
1255 usc_UnlatchTxstatusBits( info, status );
1256
1257 if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
1258 {
1259 /* finished sending HDLC abort. This may leave */
1260 /* the TxFifo with data from the aborted frame */
1261 /* so purge the TxFifo. Also shutdown the DMA */
1262 /* channel in case there is data remaining in */
1263 /* the DMA buffer */
1264 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
1265 usc_RTCmd( info, RTCmd_PurgeTxFifo );
1266 }
1267
1268 if ( status & TXSTATUS_EOF_SENT )
1269 info->icount.txok++;
1270 else if ( status & TXSTATUS_UNDERRUN )
1271 info->icount.txunder++;
1272 else if ( status & TXSTATUS_ABORT_SENT )
1273 info->icount.txabort++;
1274 else
1275 info->icount.txunder++;
1276
1277 info->tx_active = 0;
1278 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1279 del_timer(&info->tx_timer);
1280
1281 if ( info->drop_rts_on_tx_done ) {
1282 usc_get_serial_signals( info );
1283 if ( info->serial_signals & SerialSignal_RTS ) {
1284 info->serial_signals &= ~SerialSignal_RTS;
1285 usc_set_serial_signals( info );
1286 }
1287 info->drop_rts_on_tx_done = 0;
1288 }
1289
1290#ifdef CONFIG_HDLC
1291 if (info->netcount)
1292 hdlcdev_tx_done(info);
1293 else
1294#endif
1295 {
1296 if (info->tty->stopped || info->tty->hw_stopped) {
1297 usc_stop_transmitter(info);
1298 return;
1299 }
1300 info->pending_bh |= BH_TRANSMIT;
1301 }
1302
1303} /* end of mgsl_isr_transmit_status() */
1304
1305/* mgsl_isr_io_pin()
1306 *
1307 * Service an Input/Output pin interrupt. The type of
1308 * interrupt is indicated by bits in the MISR
1309 *
1310 * Arguments: info pointer to device instance data
1311 * Return Value: None
1312 */
1313static void mgsl_isr_io_pin( struct mgsl_struct *info )
1314{
1315 struct mgsl_icount *icount;
1316 u16 status = usc_InReg( info, MISR );
1317
1318 if ( debug_level >= DEBUG_LEVEL_ISR )
1319 printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
1320 __FILE__,__LINE__,status);
1321
1322 usc_ClearIrqPendingBits( info, IO_PIN );
1323 usc_UnlatchIostatusBits( info, status );
1324
1325 if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
1326 MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
1327 icount = &info->icount;
1328 /* update input line counters */
1329 if (status & MISCSTATUS_RI_LATCHED) {
1330 if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1331 usc_DisablestatusIrqs(info,SICR_RI);
1332 icount->rng++;
1333 if ( status & MISCSTATUS_RI )
1334 info->input_signal_events.ri_up++;
1335 else
1336 info->input_signal_events.ri_down++;
1337 }
1338 if (status & MISCSTATUS_DSR_LATCHED) {
1339 if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1340 usc_DisablestatusIrqs(info,SICR_DSR);
1341 icount->dsr++;
1342 if ( status & MISCSTATUS_DSR )
1343 info->input_signal_events.dsr_up++;
1344 else
1345 info->input_signal_events.dsr_down++;
1346 }
1347 if (status & MISCSTATUS_DCD_LATCHED) {
1348 if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1349 usc_DisablestatusIrqs(info,SICR_DCD);
1350 icount->dcd++;
1351 if (status & MISCSTATUS_DCD) {
1352 info->input_signal_events.dcd_up++;
1353 } else
1354 info->input_signal_events.dcd_down++;
1355#ifdef CONFIG_HDLC
1356 if (info->netcount)
1357 hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev);
1358#endif
1359 }
1360 if (status & MISCSTATUS_CTS_LATCHED)
1361 {
1362 if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
1363 usc_DisablestatusIrqs(info,SICR_CTS);
1364 icount->cts++;
1365 if ( status & MISCSTATUS_CTS )
1366 info->input_signal_events.cts_up++;
1367 else
1368 info->input_signal_events.cts_down++;
1369 }
1370 wake_up_interruptible(&info->status_event_wait_q);
1371 wake_up_interruptible(&info->event_wait_q);
1372
1373 if ( (info->flags & ASYNC_CHECK_CD) &&
1374 (status & MISCSTATUS_DCD_LATCHED) ) {
1375 if ( debug_level >= DEBUG_LEVEL_ISR )
1376 printk("%s CD now %s...", info->device_name,
1377 (status & MISCSTATUS_DCD) ? "on" : "off");
1378 if (status & MISCSTATUS_DCD)
1379 wake_up_interruptible(&info->open_wait);
1380 else {
1381 if ( debug_level >= DEBUG_LEVEL_ISR )
1382 printk("doing serial hangup...");
1383 if (info->tty)
1384 tty_hangup(info->tty);
1385 }
1386 }
1387
1388 if ( (info->flags & ASYNC_CTS_FLOW) &&
1389 (status & MISCSTATUS_CTS_LATCHED) ) {
1390 if (info->tty->hw_stopped) {
1391 if (status & MISCSTATUS_CTS) {
1392 if ( debug_level >= DEBUG_LEVEL_ISR )
1393 printk("CTS tx start...");
1394 if (info->tty)
1395 info->tty->hw_stopped = 0;
1396 usc_start_transmitter(info);
1397 info->pending_bh |= BH_TRANSMIT;
1398 return;
1399 }
1400 } else {
1401 if (!(status & MISCSTATUS_CTS)) {
1402 if ( debug_level >= DEBUG_LEVEL_ISR )
1403 printk("CTS tx stop...");
1404 if (info->tty)
1405 info->tty->hw_stopped = 1;
1406 usc_stop_transmitter(info);
1407 }
1408 }
1409 }
1410 }
1411
1412 info->pending_bh |= BH_STATUS;
1413
1414 /* for diagnostics set IRQ flag */
1415 if ( status & MISCSTATUS_TXC_LATCHED ){
1416 usc_OutReg( info, SICR,
1417 (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
1418 usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
1419 info->irq_occurred = 1;
1420 }
1421
1422} /* end of mgsl_isr_io_pin() */
1423
1424/* mgsl_isr_transmit_data()
1425 *
1426 * Service a transmit data interrupt (async mode only).
1427 *
1428 * Arguments: info pointer to device instance data
1429 * Return Value: None
1430 */
1431static void mgsl_isr_transmit_data( struct mgsl_struct *info )
1432{
1433 if ( debug_level >= DEBUG_LEVEL_ISR )
1434 printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
1435 __FILE__,__LINE__,info->xmit_cnt);
1436
1437 usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
1438
1439 if (info->tty->stopped || info->tty->hw_stopped) {
1440 usc_stop_transmitter(info);
1441 return;
1442 }
1443
1444 if ( info->xmit_cnt )
1445 usc_load_txfifo( info );
1446 else
1447 info->tx_active = 0;
1448
1449 if (info->xmit_cnt < WAKEUP_CHARS)
1450 info->pending_bh |= BH_TRANSMIT;
1451
1452} /* end of mgsl_isr_transmit_data() */
1453
1454/* mgsl_isr_receive_data()
1455 *
1456 * Service a receive data interrupt. This occurs
1457 * when operating in asynchronous interrupt transfer mode.
1458 * The receive data FIFO is flushed to the receive data buffers.
1459 *
1460 * Arguments: info pointer to device instance data
1461 * Return Value: None
1462 */
1463static void mgsl_isr_receive_data( struct mgsl_struct *info )
1464{
1465 int Fifocount;
1466 u16 status;
1467 unsigned char DataByte;
1468 struct tty_struct *tty = info->tty;
1469 struct mgsl_icount *icount = &info->icount;
1470
1471 if ( debug_level >= DEBUG_LEVEL_ISR )
1472 printk("%s(%d):mgsl_isr_receive_data\n",
1473 __FILE__,__LINE__);
1474
1475 usc_ClearIrqPendingBits( info, RECEIVE_DATA );
1476
1477 /* select FIFO status for RICR readback */
1478 usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
1479
1480 /* clear the Wordstatus bit so that status readback */
1481 /* only reflects the status of this byte */
1482 usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
1483
1484 /* flush the receive FIFO */
1485
1486 while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
1487 /* read one byte from RxFIFO */
1488 outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
1489 info->io_base + CCAR );
1490 DataByte = inb( info->io_base + CCAR );
1491
1492 /* get the status of the received byte */
1493 status = usc_InReg(info, RCSR);
1494 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1495 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
1496 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
1497
1498 if (tty->flip.count >= TTY_FLIPBUF_SIZE)
1499 continue;
1500
1501 *tty->flip.char_buf_ptr = DataByte;
1502 icount->rx++;
1503
1504 *tty->flip.flag_buf_ptr = 0;
1505 if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
1506 RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
1507 printk("rxerr=%04X\n",status);
1508 /* update error statistics */
1509 if ( status & RXSTATUS_BREAK_RECEIVED ) {
1510 status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
1511 icount->brk++;
1512 } else if (status & RXSTATUS_PARITY_ERROR)
1513 icount->parity++;
1514 else if (status & RXSTATUS_FRAMING_ERROR)
1515 icount->frame++;
1516 else if (status & RXSTATUS_OVERRUN) {
1517 /* must issue purge fifo cmd before */
1518 /* 16C32 accepts more receive chars */
1519 usc_RTCmd(info,RTCmd_PurgeRxFifo);
1520 icount->overrun++;
1521 }
1522
1523 /* discard char if tty control flags say so */
1524 if (status & info->ignore_status_mask)
1525 continue;
1526
1527 status &= info->read_status_mask;
1528
1529 if (status & RXSTATUS_BREAK_RECEIVED) {
1530 *tty->flip.flag_buf_ptr = TTY_BREAK;
1531 if (info->flags & ASYNC_SAK)
1532 do_SAK(tty);
1533 } else if (status & RXSTATUS_PARITY_ERROR)
1534 *tty->flip.flag_buf_ptr = TTY_PARITY;
1535 else if (status & RXSTATUS_FRAMING_ERROR)
1536 *tty->flip.flag_buf_ptr = TTY_FRAME;
1537 if (status & RXSTATUS_OVERRUN) {
1538 /* Overrun is special, since it's
1539 * reported immediately, and doesn't
1540 * affect the current character
1541 */
1542 if (tty->flip.count < TTY_FLIPBUF_SIZE) {
1543 tty->flip.count++;
1544 tty->flip.flag_buf_ptr++;
1545 tty->flip.char_buf_ptr++;
1546 *tty->flip.flag_buf_ptr = TTY_OVERRUN;
1547 }
1548 }
1549 } /* end of if (error) */
1550
1551 tty->flip.flag_buf_ptr++;
1552 tty->flip.char_buf_ptr++;
1553 tty->flip.count++;
1554 }
1555
1556 if ( debug_level >= DEBUG_LEVEL_ISR ) {
1557 printk("%s(%d):mgsl_isr_receive_data flip count=%d\n",
1558 __FILE__,__LINE__,tty->flip.count);
1559 printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
1560 __FILE__,__LINE__,icount->rx,icount->brk,
1561 icount->parity,icount->frame,icount->overrun);
1562 }
1563
1564 if ( tty->flip.count )
1565 tty_flip_buffer_push(tty);
1566}
1567
1568/* mgsl_isr_misc()
1569 *
1570 * Service a miscellaneos interrupt source.
1571 *
1572 * Arguments: info pointer to device extension (instance data)
1573 * Return Value: None
1574 */
1575static void mgsl_isr_misc( struct mgsl_struct *info )
1576{
1577 u16 status = usc_InReg( info, MISR );
1578
1579 if ( debug_level >= DEBUG_LEVEL_ISR )
1580 printk("%s(%d):mgsl_isr_misc status=%04X\n",
1581 __FILE__,__LINE__,status);
1582
1583 if ((status & MISCSTATUS_RCC_UNDERRUN) &&
1584 (info->params.mode == MGSL_MODE_HDLC)) {
1585
1586 /* turn off receiver and rx DMA */
1587 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
1588 usc_DmaCmd(info, DmaCmd_ResetRxChannel);
1589 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
1590 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
1591 usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
1592
1593 /* schedule BH handler to restart receiver */
1594 info->pending_bh |= BH_RECEIVE;
1595 info->rx_rcc_underrun = 1;
1596 }
1597
1598 usc_ClearIrqPendingBits( info, MISC );
1599 usc_UnlatchMiscstatusBits( info, status );
1600
1601} /* end of mgsl_isr_misc() */
1602
1603/* mgsl_isr_null()
1604 *
1605 * Services undefined interrupt vectors from the
1606 * USC. (hence this function SHOULD never be called)
1607 *
1608 * Arguments: info pointer to device extension (instance data)
1609 * Return Value: None
1610 */
1611static void mgsl_isr_null( struct mgsl_struct *info )
1612{
1613
1614} /* end of mgsl_isr_null() */
1615
1616/* mgsl_isr_receive_dma()
1617 *
1618 * Service a receive DMA channel interrupt.
1619 * For this driver there are two sources of receive DMA interrupts
1620 * as identified in the Receive DMA mode Register (RDMR):
1621 *
1622 * BIT3 EOA/EOL End of List, all receive buffers in receive
1623 * buffer list have been filled (no more free buffers
1624 * available). The DMA controller has shut down.
1625 *
1626 * BIT2 EOB End of Buffer. This interrupt occurs when a receive
1627 * DMA buffer is terminated in response to completion
1628 * of a good frame or a frame with errors. The status
1629 * of the frame is stored in the buffer entry in the
1630 * list of receive buffer entries.
1631 *
1632 * Arguments: info pointer to device instance data
1633 * Return Value: None
1634 */
1635static void mgsl_isr_receive_dma( struct mgsl_struct *info )
1636{
1637 u16 status;
1638
1639 /* clear interrupt pending and IUS bit for Rx DMA IRQ */
1640 usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
1641
1642 /* Read the receive DMA status to identify interrupt type. */
1643 /* This also clears the status bits. */
1644 status = usc_InDmaReg( info, RDMR );
1645
1646 if ( debug_level >= DEBUG_LEVEL_ISR )
1647 printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
1648 __FILE__,__LINE__,info->device_name,status);
1649
1650 info->pending_bh |= BH_RECEIVE;
1651
1652 if ( status & BIT3 ) {
1653 info->rx_overflow = 1;
1654 info->icount.buf_overrun++;
1655 }
1656
1657} /* end of mgsl_isr_receive_dma() */
1658
1659/* mgsl_isr_transmit_dma()
1660 *
1661 * This function services a transmit DMA channel interrupt.
1662 *
1663 * For this driver there is one source of transmit DMA interrupts
1664 * as identified in the Transmit DMA Mode Register (TDMR):
1665 *
1666 * BIT2 EOB End of Buffer. This interrupt occurs when a
1667 * transmit DMA buffer has been emptied.
1668 *
1669 * The driver maintains enough transmit DMA buffers to hold at least
1670 * one max frame size transmit frame. When operating in a buffered
1671 * transmit mode, there may be enough transmit DMA buffers to hold at
1672 * least two or more max frame size frames. On an EOB condition,
1673 * determine if there are any queued transmit buffers and copy into
1674 * transmit DMA buffers if we have room.
1675 *
1676 * Arguments: info pointer to device instance data
1677 * Return Value: None
1678 */
1679static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
1680{
1681 u16 status;
1682
1683 /* clear interrupt pending and IUS bit for Tx DMA IRQ */
1684 usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
1685
1686 /* Read the transmit DMA status to identify interrupt type. */
1687 /* This also clears the status bits. */
1688
1689 status = usc_InDmaReg( info, TDMR );
1690
1691 if ( debug_level >= DEBUG_LEVEL_ISR )
1692 printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
1693 __FILE__,__LINE__,info->device_name,status);
1694
1695 if ( status & BIT2 ) {
1696 --info->tx_dma_buffers_used;
1697
1698 /* if there are transmit frames queued,
1699 * try to load the next one
1700 */
1701 if ( load_next_tx_holding_buffer(info) ) {
1702 /* if call returns non-zero value, we have
1703 * at least one free tx holding buffer
1704 */
1705 info->pending_bh |= BH_TRANSMIT;
1706 }
1707 }
1708
1709} /* end of mgsl_isr_transmit_dma() */
1710
1711/* mgsl_interrupt()
1712 *
1713 * Interrupt service routine entry point.
1714 *
1715 * Arguments:
1716 *
1717 * irq interrupt number that caused interrupt
1718 * dev_id device ID supplied during interrupt registration
1719 * regs interrupted processor context
1720 *
1721 * Return Value: None
1722 */
1723static irqreturn_t mgsl_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1724{
1725 struct mgsl_struct * info;
1726 u16 UscVector;
1727 u16 DmaVector;
1728
1729 if ( debug_level >= DEBUG_LEVEL_ISR )
1730 printk("%s(%d):mgsl_interrupt(%d)entry.\n",
1731 __FILE__,__LINE__,irq);
1732
1733 info = (struct mgsl_struct *)dev_id;
1734 if (!info)
1735 return IRQ_NONE;
1736
1737 spin_lock(&info->irq_spinlock);
1738
1739 for(;;) {
1740 /* Read the interrupt vectors from hardware. */
1741 UscVector = usc_InReg(info, IVR) >> 9;
1742 DmaVector = usc_InDmaReg(info, DIVR);
1743
1744 if ( debug_level >= DEBUG_LEVEL_ISR )
1745 printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
1746 __FILE__,__LINE__,info->device_name,UscVector,DmaVector);
1747
1748 if ( !UscVector && !DmaVector )
1749 break;
1750
1751 /* Dispatch interrupt vector */
1752 if ( UscVector )
1753 (*UscIsrTable[UscVector])(info);
1754 else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
1755 mgsl_isr_transmit_dma(info);
1756 else
1757 mgsl_isr_receive_dma(info);
1758
1759 if ( info->isr_overflow ) {
1760 printk(KERN_ERR"%s(%d):%s isr overflow irq=%d\n",
1761 __FILE__,__LINE__,info->device_name, irq);
1762 usc_DisableMasterIrqBit(info);
1763 usc_DisableDmaInterrupts(info,DICR_MASTER);
1764 break;
1765 }
1766 }
1767
1768 /* Request bottom half processing if there's something
1769 * for it to do and the bh is not already running
1770 */
1771
1772 if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
1773 if ( debug_level >= DEBUG_LEVEL_ISR )
1774 printk("%s(%d):%s queueing bh task.\n",
1775 __FILE__,__LINE__,info->device_name);
1776 schedule_work(&info->task);
1777 info->bh_requested = 1;
1778 }
1779
1780 spin_unlock(&info->irq_spinlock);
1781
1782 if ( debug_level >= DEBUG_LEVEL_ISR )
1783 printk("%s(%d):mgsl_interrupt(%d)exit.\n",
1784 __FILE__,__LINE__,irq);
1785 return IRQ_HANDLED;
1786} /* end of mgsl_interrupt() */
1787
1788/* startup()
1789 *
1790 * Initialize and start device.
1791 *
1792 * Arguments: info pointer to device instance data
1793 * Return Value: 0 if success, otherwise error code
1794 */
1795static int startup(struct mgsl_struct * info)
1796{
1797 int retval = 0;
1798
1799 if ( debug_level >= DEBUG_LEVEL_INFO )
1800 printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
1801
1802 if (info->flags & ASYNC_INITIALIZED)
1803 return 0;
1804
1805 if (!info->xmit_buf) {
1806 /* allocate a page of memory for a transmit buffer */
1807 info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
1808 if (!info->xmit_buf) {
1809 printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
1810 __FILE__,__LINE__,info->device_name);
1811 return -ENOMEM;
1812 }
1813 }
1814
1815 info->pending_bh = 0;
1816
1817 init_timer(&info->tx_timer);
1818 info->tx_timer.data = (unsigned long)info;
1819 info->tx_timer.function = mgsl_tx_timeout;
1820
1821 /* Allocate and claim adapter resources */
1822 retval = mgsl_claim_resources(info);
1823
1824 /* perform existence check and diagnostics */
1825 if ( !retval )
1826 retval = mgsl_adapter_test(info);
1827
1828 if ( retval ) {
1829 if (capable(CAP_SYS_ADMIN) && info->tty)
1830 set_bit(TTY_IO_ERROR, &info->tty->flags);
1831 mgsl_release_resources(info);
1832 return retval;
1833 }
1834
1835 /* program hardware for current parameters */
1836 mgsl_change_params(info);
1837
1838 if (info->tty)
1839 clear_bit(TTY_IO_ERROR, &info->tty->flags);
1840
1841 info->flags |= ASYNC_INITIALIZED;
1842
1843 return 0;
1844
1845} /* end of startup() */
1846
1847/* shutdown()
1848 *
1849 * Called by mgsl_close() and mgsl_hangup() to shutdown hardware
1850 *
1851 * Arguments: info pointer to device instance data
1852 * Return Value: None
1853 */
1854static void shutdown(struct mgsl_struct * info)
1855{
1856 unsigned long flags;
1857
1858 if (!(info->flags & ASYNC_INITIALIZED))
1859 return;
1860
1861 if (debug_level >= DEBUG_LEVEL_INFO)
1862 printk("%s(%d):mgsl_shutdown(%s)\n",
1863 __FILE__,__LINE__, info->device_name );
1864
1865 /* clear status wait queue because status changes */
1866 /* can't happen after shutting down the hardware */
1867 wake_up_interruptible(&info->status_event_wait_q);
1868 wake_up_interruptible(&info->event_wait_q);
1869
1870 del_timer(&info->tx_timer);
1871
1872 if (info->xmit_buf) {
1873 free_page((unsigned long) info->xmit_buf);
1874 info->xmit_buf = NULL;
1875 }
1876
1877 spin_lock_irqsave(&info->irq_spinlock,flags);
1878 usc_DisableMasterIrqBit(info);
1879 usc_stop_receiver(info);
1880 usc_stop_transmitter(info);
1881 usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
1882 TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
1883 usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
1884
1885 /* Disable DMAEN (Port 7, Bit 14) */
1886 /* This disconnects the DMA request signal from the ISA bus */
1887 /* on the ISA adapter. This has no effect for the PCI adapter */
1888 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
1889
1890 /* Disable INTEN (Port 6, Bit12) */
1891 /* This disconnects the IRQ request signal to the ISA bus */
1892 /* on the ISA adapter. This has no effect for the PCI adapter */
1893 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
1894
1895 if (!info->tty || info->tty->termios->c_cflag & HUPCL) {
1896 info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS);
1897 usc_set_serial_signals(info);
1898 }
1899
1900 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1901
1902 mgsl_release_resources(info);
1903
1904 if (info->tty)
1905 set_bit(TTY_IO_ERROR, &info->tty->flags);
1906
1907 info->flags &= ~ASYNC_INITIALIZED;
1908
1909} /* end of shutdown() */
1910
1911static void mgsl_program_hw(struct mgsl_struct *info)
1912{
1913 unsigned long flags;
1914
1915 spin_lock_irqsave(&info->irq_spinlock,flags);
1916
1917 usc_stop_receiver(info);
1918 usc_stop_transmitter(info);
1919 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
1920
1921 if (info->params.mode == MGSL_MODE_HDLC ||
1922 info->params.mode == MGSL_MODE_RAW ||
1923 info->netcount)
1924 usc_set_sync_mode(info);
1925 else
1926 usc_set_async_mode(info);
1927
1928 usc_set_serial_signals(info);
1929
1930 info->dcd_chkcount = 0;
1931 info->cts_chkcount = 0;
1932 info->ri_chkcount = 0;
1933 info->dsr_chkcount = 0;
1934
1935 usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
1936 usc_EnableInterrupts(info, IO_PIN);
1937 usc_get_serial_signals(info);
1938
1939 if (info->netcount || info->tty->termios->c_cflag & CREAD)
1940 usc_start_receiver(info);
1941
1942 spin_unlock_irqrestore(&info->irq_spinlock,flags);
1943}
1944
1945/* Reconfigure adapter based on new parameters
1946 */
1947static void mgsl_change_params(struct mgsl_struct *info)
1948{
1949 unsigned cflag;
1950 int bits_per_char;
1951
1952 if (!info->tty || !info->tty->termios)
1953 return;
1954
1955 if (debug_level >= DEBUG_LEVEL_INFO)
1956 printk("%s(%d):mgsl_change_params(%s)\n",
1957 __FILE__,__LINE__, info->device_name );
1958
1959 cflag = info->tty->termios->c_cflag;
1960
1961 /* if B0 rate (hangup) specified then negate DTR and RTS */
1962 /* otherwise assert DTR and RTS */
1963 if (cflag & CBAUD)
1964 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
1965 else
1966 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
1967
1968 /* byte size and parity */
1969
1970 switch (cflag & CSIZE) {
1971 case CS5: info->params.data_bits = 5; break;
1972 case CS6: info->params.data_bits = 6; break;
1973 case CS7: info->params.data_bits = 7; break;
1974 case CS8: info->params.data_bits = 8; break;
1975 /* Never happens, but GCC is too dumb to figure it out */
1976 default: info->params.data_bits = 7; break;
1977 }
1978
1979 if (cflag & CSTOPB)
1980 info->params.stop_bits = 2;
1981 else
1982 info->params.stop_bits = 1;
1983
1984 info->params.parity = ASYNC_PARITY_NONE;
1985 if (cflag & PARENB) {
1986 if (cflag & PARODD)
1987 info->params.parity = ASYNC_PARITY_ODD;
1988 else
1989 info->params.parity = ASYNC_PARITY_EVEN;
1990#ifdef CMSPAR
1991 if (cflag & CMSPAR)
1992 info->params.parity = ASYNC_PARITY_SPACE;
1993#endif
1994 }
1995
1996 /* calculate number of jiffies to transmit a full
1997 * FIFO (32 bytes) at specified data rate
1998 */
1999 bits_per_char = info->params.data_bits +
2000 info->params.stop_bits + 1;
2001
2002 /* if port data rate is set to 460800 or less then
2003 * allow tty settings to override, otherwise keep the
2004 * current data rate.
2005 */
2006 if (info->params.data_rate <= 460800)
2007 info->params.data_rate = tty_get_baud_rate(info->tty);
2008
2009 if ( info->params.data_rate ) {
2010 info->timeout = (32*HZ*bits_per_char) /
2011 info->params.data_rate;
2012 }
2013 info->timeout += HZ/50; /* Add .02 seconds of slop */
2014
2015 if (cflag & CRTSCTS)
2016 info->flags |= ASYNC_CTS_FLOW;
2017 else
2018 info->flags &= ~ASYNC_CTS_FLOW;
2019
2020 if (cflag & CLOCAL)
2021 info->flags &= ~ASYNC_CHECK_CD;
2022 else
2023 info->flags |= ASYNC_CHECK_CD;
2024
2025 /* process tty input control flags */
2026
2027 info->read_status_mask = RXSTATUS_OVERRUN;
2028 if (I_INPCK(info->tty))
2029 info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2030 if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
2031 info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
2032
2033 if (I_IGNPAR(info->tty))
2034 info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
2035 if (I_IGNBRK(info->tty)) {
2036 info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
2037 /* If ignoring parity and break indicators, ignore
2038 * overruns too. (For real raw support).
2039 */
2040 if (I_IGNPAR(info->tty))
2041 info->ignore_status_mask |= RXSTATUS_OVERRUN;
2042 }
2043
2044 mgsl_program_hw(info);
2045
2046} /* end of mgsl_change_params() */
2047
2048/* mgsl_put_char()
2049 *
2050 * Add a character to the transmit buffer.
2051 *
2052 * Arguments: tty pointer to tty information structure
2053 * ch character to add to transmit buffer
2054 *
2055 * Return Value: None
2056 */
2057static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
2058{
2059 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2060 unsigned long flags;
2061
2062 if ( debug_level >= DEBUG_LEVEL_INFO ) {
2063 printk( "%s(%d):mgsl_put_char(%d) on %s\n",
2064 __FILE__,__LINE__,ch,info->device_name);
2065 }
2066
2067 if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
2068 return;
2069
2070 if (!tty || !info->xmit_buf)
2071 return;
2072
2073 spin_lock_irqsave(&info->irq_spinlock,flags);
2074
2075 if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
2076
2077 if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
2078 info->xmit_buf[info->xmit_head++] = ch;
2079 info->xmit_head &= SERIAL_XMIT_SIZE-1;
2080 info->xmit_cnt++;
2081 }
2082 }
2083
2084 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2085
2086} /* end of mgsl_put_char() */
2087
2088/* mgsl_flush_chars()
2089 *
2090 * Enable transmitter so remaining characters in the
2091 * transmit buffer are sent.
2092 *
2093 * Arguments: tty pointer to tty information structure
2094 * Return Value: None
2095 */
2096static void mgsl_flush_chars(struct tty_struct *tty)
2097{
2098 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2099 unsigned long flags;
2100
2101 if ( debug_level >= DEBUG_LEVEL_INFO )
2102 printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
2103 __FILE__,__LINE__,info->device_name,info->xmit_cnt);
2104
2105 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
2106 return;
2107
2108 if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
2109 !info->xmit_buf)
2110 return;
2111
2112 if ( debug_level >= DEBUG_LEVEL_INFO )
2113 printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
2114 __FILE__,__LINE__,info->device_name );
2115
2116 spin_lock_irqsave(&info->irq_spinlock,flags);
2117
2118 if (!info->tx_active) {
2119 if ( (info->params.mode == MGSL_MODE_HDLC ||
2120 info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
2121 /* operating in synchronous (frame oriented) mode */
2122 /* copy data from circular xmit_buf to */
2123 /* transmit DMA buffer. */
2124 mgsl_load_tx_dma_buffer(info,
2125 info->xmit_buf,info->xmit_cnt);
2126 }
2127 usc_start_transmitter(info);
2128 }
2129
2130 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2131
2132} /* end of mgsl_flush_chars() */
2133
2134/* mgsl_write()
2135 *
2136 * Send a block of data
2137 *
2138 * Arguments:
2139 *
2140 * tty pointer to tty information structure
2141 * buf pointer to buffer containing send data
2142 * count size of send data in bytes
2143 *
2144 * Return Value: number of characters written
2145 */
2146static int mgsl_write(struct tty_struct * tty,
2147 const unsigned char *buf, int count)
2148{
2149 int c, ret = 0;
2150 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2151 unsigned long flags;
2152
2153 if ( debug_level >= DEBUG_LEVEL_INFO )
2154 printk( "%s(%d):mgsl_write(%s) count=%d\n",
2155 __FILE__,__LINE__,info->device_name,count);
2156
2157 if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
2158 goto cleanup;
2159
2160 if (!tty || !info->xmit_buf || !tmp_buf)
2161 goto cleanup;
2162
2163 if ( info->params.mode == MGSL_MODE_HDLC ||
2164 info->params.mode == MGSL_MODE_RAW ) {
2165 /* operating in synchronous (frame oriented) mode */
2166 /* operating in synchronous (frame oriented) mode */
2167 if (info->tx_active) {
2168
2169 if ( info->params.mode == MGSL_MODE_HDLC ) {
2170 ret = 0;
2171 goto cleanup;
2172 }
2173 /* transmitter is actively sending data -
2174 * if we have multiple transmit dma and
2175 * holding buffers, attempt to queue this
2176 * frame for transmission at a later time.
2177 */
2178 if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
2179 /* no tx holding buffers available */
2180 ret = 0;
2181 goto cleanup;
2182 }
2183
2184 /* queue transmit frame request */
2185 ret = count;
2186 save_tx_buffer_request(info,buf,count);
2187
2188 /* if we have sufficient tx dma buffers,
2189 * load the next buffered tx request
2190 */
2191 spin_lock_irqsave(&info->irq_spinlock,flags);
2192 load_next_tx_holding_buffer(info);
2193 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2194 goto cleanup;
2195 }
2196
2197 /* if operating in HDLC LoopMode and the adapter */
2198 /* has yet to be inserted into the loop, we can't */
2199 /* transmit */
2200
2201 if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
2202 !usc_loopmode_active(info) )
2203 {
2204 ret = 0;
2205 goto cleanup;
2206 }
2207
2208 if ( info->xmit_cnt ) {
2209 /* Send accumulated from send_char() calls */
2210 /* as frame and wait before accepting more data. */
2211 ret = 0;
2212
2213 /* copy data from circular xmit_buf to */
2214 /* transmit DMA buffer. */
2215 mgsl_load_tx_dma_buffer(info,
2216 info->xmit_buf,info->xmit_cnt);
2217 if ( debug_level >= DEBUG_LEVEL_INFO )
2218 printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
2219 __FILE__,__LINE__,info->device_name);
2220 } else {
2221 if ( debug_level >= DEBUG_LEVEL_INFO )
2222 printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
2223 __FILE__,__LINE__,info->device_name);
2224 ret = count;
2225 info->xmit_cnt = count;
2226 mgsl_load_tx_dma_buffer(info,buf,count);
2227 }
2228 } else {
2229 while (1) {
2230 spin_lock_irqsave(&info->irq_spinlock,flags);
2231 c = min_t(int, count,
2232 min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
2233 SERIAL_XMIT_SIZE - info->xmit_head));
2234 if (c <= 0) {
2235 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2236 break;
2237 }
2238 memcpy(info->xmit_buf + info->xmit_head, buf, c);
2239 info->xmit_head = ((info->xmit_head + c) &
2240 (SERIAL_XMIT_SIZE-1));
2241 info->xmit_cnt += c;
2242 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2243 buf += c;
2244 count -= c;
2245 ret += c;
2246 }
2247 }
2248
2249 if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
2250 spin_lock_irqsave(&info->irq_spinlock,flags);
2251 if (!info->tx_active)
2252 usc_start_transmitter(info);
2253 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2254 }
2255cleanup:
2256 if ( debug_level >= DEBUG_LEVEL_INFO )
2257 printk( "%s(%d):mgsl_write(%s) returning=%d\n",
2258 __FILE__,__LINE__,info->device_name,ret);
2259
2260 return ret;
2261
2262} /* end of mgsl_write() */
2263
2264/* mgsl_write_room()
2265 *
2266 * Return the count of free bytes in transmit buffer
2267 *
2268 * Arguments: tty pointer to tty info structure
2269 * Return Value: None
2270 */
2271static int mgsl_write_room(struct tty_struct *tty)
2272{
2273 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2274 int ret;
2275
2276 if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
2277 return 0;
2278 ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
2279 if (ret < 0)
2280 ret = 0;
2281
2282 if (debug_level >= DEBUG_LEVEL_INFO)
2283 printk("%s(%d):mgsl_write_room(%s)=%d\n",
2284 __FILE__,__LINE__, info->device_name,ret );
2285
2286 if ( info->params.mode == MGSL_MODE_HDLC ||
2287 info->params.mode == MGSL_MODE_RAW ) {
2288 /* operating in synchronous (frame oriented) mode */
2289 if ( info->tx_active )
2290 return 0;
2291 else
2292 return HDLC_MAX_FRAME_SIZE;
2293 }
2294
2295 return ret;
2296
2297} /* end of mgsl_write_room() */
2298
2299/* mgsl_chars_in_buffer()
2300 *
2301 * Return the count of bytes in transmit buffer
2302 *
2303 * Arguments: tty pointer to tty info structure
2304 * Return Value: None
2305 */
2306static int mgsl_chars_in_buffer(struct tty_struct *tty)
2307{
2308 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2309
2310 if (debug_level >= DEBUG_LEVEL_INFO)
2311 printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
2312 __FILE__,__LINE__, info->device_name );
2313
2314 if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
2315 return 0;
2316
2317 if (debug_level >= DEBUG_LEVEL_INFO)
2318 printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
2319 __FILE__,__LINE__, info->device_name,info->xmit_cnt );
2320
2321 if ( info->params.mode == MGSL_MODE_HDLC ||
2322 info->params.mode == MGSL_MODE_RAW ) {
2323 /* operating in synchronous (frame oriented) mode */
2324 if ( info->tx_active )
2325 return info->max_frame_size;
2326 else
2327 return 0;
2328 }
2329
2330 return info->xmit_cnt;
2331} /* end of mgsl_chars_in_buffer() */
2332
2333/* mgsl_flush_buffer()
2334 *
2335 * Discard all data in the send buffer
2336 *
2337 * Arguments: tty pointer to tty info structure
2338 * Return Value: None
2339 */
2340static void mgsl_flush_buffer(struct tty_struct *tty)
2341{
2342 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2343 unsigned long flags;
2344
2345 if (debug_level >= DEBUG_LEVEL_INFO)
2346 printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
2347 __FILE__,__LINE__, info->device_name );
2348
2349 if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
2350 return;
2351
2352 spin_lock_irqsave(&info->irq_spinlock,flags);
2353 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
2354 del_timer(&info->tx_timer);
2355 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2356
2357 wake_up_interruptible(&tty->write_wait);
2358 tty_wakeup(tty);
2359}
2360
2361/* mgsl_send_xchar()
2362 *
2363 * Send a high-priority XON/XOFF character
2364 *
2365 * Arguments: tty pointer to tty info structure
2366 * ch character to send
2367 * Return Value: None
2368 */
2369static void mgsl_send_xchar(struct tty_struct *tty, char ch)
2370{
2371 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2372 unsigned long flags;
2373
2374 if (debug_level >= DEBUG_LEVEL_INFO)
2375 printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
2376 __FILE__,__LINE__, info->device_name, ch );
2377
2378 if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
2379 return;
2380
2381 info->x_char = ch;
2382 if (ch) {
2383 /* Make sure transmit interrupts are on */
2384 spin_lock_irqsave(&info->irq_spinlock,flags);
2385 if (!info->tx_enabled)
2386 usc_start_transmitter(info);
2387 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2388 }
2389} /* end of mgsl_send_xchar() */
2390
2391/* mgsl_throttle()
2392 *
2393 * Signal remote device to throttle send data (our receive data)
2394 *
2395 * Arguments: tty pointer to tty info structure
2396 * Return Value: None
2397 */
2398static void mgsl_throttle(struct tty_struct * tty)
2399{
2400 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2401 unsigned long flags;
2402
2403 if (debug_level >= DEBUG_LEVEL_INFO)
2404 printk("%s(%d):mgsl_throttle(%s) entry\n",
2405 __FILE__,__LINE__, info->device_name );
2406
2407 if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
2408 return;
2409
2410 if (I_IXOFF(tty))
2411 mgsl_send_xchar(tty, STOP_CHAR(tty));
2412
2413 if (tty->termios->c_cflag & CRTSCTS) {
2414 spin_lock_irqsave(&info->irq_spinlock,flags);
2415 info->serial_signals &= ~SerialSignal_RTS;
2416 usc_set_serial_signals(info);
2417 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2418 }
2419} /* end of mgsl_throttle() */
2420
2421/* mgsl_unthrottle()
2422 *
2423 * Signal remote device to stop throttling send data (our receive data)
2424 *
2425 * Arguments: tty pointer to tty info structure
2426 * Return Value: None
2427 */
2428static void mgsl_unthrottle(struct tty_struct * tty)
2429{
2430 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2431 unsigned long flags;
2432
2433 if (debug_level >= DEBUG_LEVEL_INFO)
2434 printk("%s(%d):mgsl_unthrottle(%s) entry\n",
2435 __FILE__,__LINE__, info->device_name );
2436
2437 if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
2438 return;
2439
2440 if (I_IXOFF(tty)) {
2441 if (info->x_char)
2442 info->x_char = 0;
2443 else
2444 mgsl_send_xchar(tty, START_CHAR(tty));
2445 }
2446
2447 if (tty->termios->c_cflag & CRTSCTS) {
2448 spin_lock_irqsave(&info->irq_spinlock,flags);
2449 info->serial_signals |= SerialSignal_RTS;
2450 usc_set_serial_signals(info);
2451 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2452 }
2453
2454} /* end of mgsl_unthrottle() */
2455
2456/* mgsl_get_stats()
2457 *
2458 * get the current serial parameters information
2459 *
2460 * Arguments: info pointer to device instance data
2461 * user_icount pointer to buffer to hold returned stats
2462 *
2463 * Return Value: 0 if success, otherwise error code
2464 */
2465static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
2466{
2467 int err;
2468
2469 if (debug_level >= DEBUG_LEVEL_INFO)
2470 printk("%s(%d):mgsl_get_params(%s)\n",
2471 __FILE__,__LINE__, info->device_name);
2472
2473 COPY_TO_USER(err,user_icount, &info->icount, sizeof(struct mgsl_icount));
2474 if (err) {
2475 if ( debug_level >= DEBUG_LEVEL_INFO )
2476 printk( "%s(%d):mgsl_get_stats(%s) user buffer copy failed\n",
2477 __FILE__,__LINE__,info->device_name);
2478 return -EFAULT;
2479 }
2480
2481 return 0;
2482
2483} /* end of mgsl_get_stats() */
2484
2485/* mgsl_get_params()
2486 *
2487 * get the current serial parameters information
2488 *
2489 * Arguments: info pointer to device instance data
2490 * user_params pointer to buffer to hold returned params
2491 *
2492 * Return Value: 0 if success, otherwise error code
2493 */
2494static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
2495{
2496 int err;
2497 if (debug_level >= DEBUG_LEVEL_INFO)
2498 printk("%s(%d):mgsl_get_params(%s)\n",
2499 __FILE__,__LINE__, info->device_name);
2500
2501 COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
2502 if (err) {
2503 if ( debug_level >= DEBUG_LEVEL_INFO )
2504 printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
2505 __FILE__,__LINE__,info->device_name);
2506 return -EFAULT;
2507 }
2508
2509 return 0;
2510
2511} /* end of mgsl_get_params() */
2512
2513/* mgsl_set_params()
2514 *
2515 * set the serial parameters
2516 *
2517 * Arguments:
2518 *
2519 * info pointer to device instance data
2520 * new_params user buffer containing new serial params
2521 *
2522 * Return Value: 0 if success, otherwise error code
2523 */
2524static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
2525{
2526 unsigned long flags;
2527 MGSL_PARAMS tmp_params;
2528 int err;
2529
2530 if (debug_level >= DEBUG_LEVEL_INFO)
2531 printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
2532 info->device_name );
2533 COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
2534 if (err) {
2535 if ( debug_level >= DEBUG_LEVEL_INFO )
2536 printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
2537 __FILE__,__LINE__,info->device_name);
2538 return -EFAULT;
2539 }
2540
2541 spin_lock_irqsave(&info->irq_spinlock,flags);
2542 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
2543 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2544
2545 mgsl_change_params(info);
2546
2547 return 0;
2548
2549} /* end of mgsl_set_params() */
2550
2551/* mgsl_get_txidle()
2552 *
2553 * get the current transmit idle mode
2554 *
2555 * Arguments: info pointer to device instance data
2556 * idle_mode pointer to buffer to hold returned idle mode
2557 *
2558 * Return Value: 0 if success, otherwise error code
2559 */
2560static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
2561{
2562 int err;
2563
2564 if (debug_level >= DEBUG_LEVEL_INFO)
2565 printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
2566 __FILE__,__LINE__, info->device_name, info->idle_mode);
2567
2568 COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
2569 if (err) {
2570 if ( debug_level >= DEBUG_LEVEL_INFO )
2571 printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
2572 __FILE__,__LINE__,info->device_name);
2573 return -EFAULT;
2574 }
2575
2576 return 0;
2577
2578} /* end of mgsl_get_txidle() */
2579
2580/* mgsl_set_txidle() service ioctl to set transmit idle mode
2581 *
2582 * Arguments: info pointer to device instance data
2583 * idle_mode new idle mode
2584 *
2585 * Return Value: 0 if success, otherwise error code
2586 */
2587static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
2588{
2589 unsigned long flags;
2590
2591 if (debug_level >= DEBUG_LEVEL_INFO)
2592 printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
2593 info->device_name, idle_mode );
2594
2595 spin_lock_irqsave(&info->irq_spinlock,flags);
2596 info->idle_mode = idle_mode;
2597 usc_set_txidle( info );
2598 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2599 return 0;
2600
2601} /* end of mgsl_set_txidle() */
2602
2603/* mgsl_txenable()
2604 *
2605 * enable or disable the transmitter
2606 *
2607 * Arguments:
2608 *
2609 * info pointer to device instance data
2610 * enable 1 = enable, 0 = disable
2611 *
2612 * Return Value: 0 if success, otherwise error code
2613 */
2614static int mgsl_txenable(struct mgsl_struct * info, int enable)
2615{
2616 unsigned long flags;
2617
2618 if (debug_level >= DEBUG_LEVEL_INFO)
2619 printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
2620 info->device_name, enable);
2621
2622 spin_lock_irqsave(&info->irq_spinlock,flags);
2623 if ( enable ) {
2624 if ( !info->tx_enabled ) {
2625
2626 usc_start_transmitter(info);
2627 /*--------------------------------------------------
2628 * if HDLC/SDLC Loop mode, attempt to insert the
2629 * station in the 'loop' by setting CMR:13. Upon
2630 * receipt of the next GoAhead (RxAbort) sequence,
2631 * the OnLoop indicator (CCSR:7) should go active
2632 * to indicate that we are on the loop
2633 *--------------------------------------------------*/
2634 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2635 usc_loopmode_insert_request( info );
2636 }
2637 } else {
2638 if ( info->tx_enabled )
2639 usc_stop_transmitter(info);
2640 }
2641 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2642 return 0;
2643
2644} /* end of mgsl_txenable() */
2645
2646/* mgsl_txabort() abort send HDLC frame
2647 *
2648 * Arguments: info pointer to device instance data
2649 * Return Value: 0 if success, otherwise error code
2650 */
2651static int mgsl_txabort(struct mgsl_struct * info)
2652{
2653 unsigned long flags;
2654
2655 if (debug_level >= DEBUG_LEVEL_INFO)
2656 printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
2657 info->device_name);
2658
2659 spin_lock_irqsave(&info->irq_spinlock,flags);
2660 if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
2661 {
2662 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
2663 usc_loopmode_cancel_transmit( info );
2664 else
2665 usc_TCmd(info,TCmd_SendAbort);
2666 }
2667 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2668 return 0;
2669
2670} /* end of mgsl_txabort() */
2671
2672/* mgsl_rxenable() enable or disable the receiver
2673 *
2674 * Arguments: info pointer to device instance data
2675 * enable 1 = enable, 0 = disable
2676 * Return Value: 0 if success, otherwise error code
2677 */
2678static int mgsl_rxenable(struct mgsl_struct * info, int enable)
2679{
2680 unsigned long flags;
2681
2682 if (debug_level >= DEBUG_LEVEL_INFO)
2683 printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
2684 info->device_name, enable);
2685
2686 spin_lock_irqsave(&info->irq_spinlock,flags);
2687 if ( enable ) {
2688 if ( !info->rx_enabled )
2689 usc_start_receiver(info);
2690 } else {
2691 if ( info->rx_enabled )
2692 usc_stop_receiver(info);
2693 }
2694 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2695 return 0;
2696
2697} /* end of mgsl_rxenable() */
2698
2699/* mgsl_wait_event() wait for specified event to occur
2700 *
2701 * Arguments: info pointer to device instance data
2702 * mask pointer to bitmask of events to wait for
2703 * Return Value: 0 if successful and bit mask updated with
2704 * of events triggerred,
2705 * otherwise error code
2706 */
2707static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
2708{
2709 unsigned long flags;
2710 int s;
2711 int rc=0;
2712 struct mgsl_icount cprev, cnow;
2713 int events;
2714 int mask;
2715 struct _input_signal_events oldsigs, newsigs;
2716 DECLARE_WAITQUEUE(wait, current);
2717
2718 COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
2719 if (rc) {
2720 return -EFAULT;
2721 }
2722
2723 if (debug_level >= DEBUG_LEVEL_INFO)
2724 printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
2725 info->device_name, mask);
2726
2727 spin_lock_irqsave(&info->irq_spinlock,flags);
2728
2729 /* return immediately if state matches requested events */
2730 usc_get_serial_signals(info);
2731 s = info->serial_signals;
2732 events = mask &
2733 ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
2734 ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
2735 ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
2736 ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
2737 if (events) {
2738 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2739 goto exit;
2740 }
2741
2742 /* save current irq counts */
2743 cprev = info->icount;
2744 oldsigs = info->input_signal_events;
2745
2746 /* enable hunt and idle irqs if needed */
2747 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2748 u16 oldreg = usc_InReg(info,RICR);
2749 u16 newreg = oldreg +
2750 (mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
2751 (mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
2752 if (oldreg != newreg)
2753 usc_OutReg(info, RICR, newreg);
2754 }
2755
2756 set_current_state(TASK_INTERRUPTIBLE);
2757 add_wait_queue(&info->event_wait_q, &wait);
2758
2759 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2760
2761
2762 for(;;) {
2763 schedule();
2764 if (signal_pending(current)) {
2765 rc = -ERESTARTSYS;
2766 break;
2767 }
2768
2769 /* get current irq counts */
2770 spin_lock_irqsave(&info->irq_spinlock,flags);
2771 cnow = info->icount;
2772 newsigs = info->input_signal_events;
2773 set_current_state(TASK_INTERRUPTIBLE);
2774 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2775
2776 /* if no change, wait aborted for some reason */
2777 if (newsigs.dsr_up == oldsigs.dsr_up &&
2778 newsigs.dsr_down == oldsigs.dsr_down &&
2779 newsigs.dcd_up == oldsigs.dcd_up &&
2780 newsigs.dcd_down == oldsigs.dcd_down &&
2781 newsigs.cts_up == oldsigs.cts_up &&
2782 newsigs.cts_down == oldsigs.cts_down &&
2783 newsigs.ri_up == oldsigs.ri_up &&
2784 newsigs.ri_down == oldsigs.ri_down &&
2785 cnow.exithunt == cprev.exithunt &&
2786 cnow.rxidle == cprev.rxidle) {
2787 rc = -EIO;
2788 break;
2789 }
2790
2791 events = mask &
2792 ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
2793 (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
2794 (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
2795 (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
2796 (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
2797 (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
2798 (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
2799 (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
2800 (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
2801 (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
2802 if (events)
2803 break;
2804
2805 cprev = cnow;
2806 oldsigs = newsigs;
2807 }
2808
2809 remove_wait_queue(&info->event_wait_q, &wait);
2810 set_current_state(TASK_RUNNING);
2811
2812 if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
2813 spin_lock_irqsave(&info->irq_spinlock,flags);
2814 if (!waitqueue_active(&info->event_wait_q)) {
2815 /* disable enable exit hunt mode/idle rcvd IRQs */
2816 usc_OutReg(info, RICR, usc_InReg(info,RICR) &
2817 ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
2818 }
2819 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2820 }
2821exit:
2822 if ( rc == 0 )
2823 PUT_USER(rc, events, mask_ptr);
2824
2825 return rc;
2826
2827} /* end of mgsl_wait_event() */
2828
2829static int modem_input_wait(struct mgsl_struct *info,int arg)
2830{
2831 unsigned long flags;
2832 int rc;
2833 struct mgsl_icount cprev, cnow;
2834 DECLARE_WAITQUEUE(wait, current);
2835
2836 /* save current irq counts */
2837 spin_lock_irqsave(&info->irq_spinlock,flags);
2838 cprev = info->icount;
2839 add_wait_queue(&info->status_event_wait_q, &wait);
2840 set_current_state(TASK_INTERRUPTIBLE);
2841 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2842
2843 for(;;) {
2844 schedule();
2845 if (signal_pending(current)) {
2846 rc = -ERESTARTSYS;
2847 break;
2848 }
2849
2850 /* get new irq counts */
2851 spin_lock_irqsave(&info->irq_spinlock,flags);
2852 cnow = info->icount;
2853 set_current_state(TASK_INTERRUPTIBLE);
2854 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2855
2856 /* if no change, wait aborted for some reason */
2857 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2858 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
2859 rc = -EIO;
2860 break;
2861 }
2862
2863 /* check for change in caller specified modem input */
2864 if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
2865 (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
2866 (arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
2867 (arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
2868 rc = 0;
2869 break;
2870 }
2871
2872 cprev = cnow;
2873 }
2874 remove_wait_queue(&info->status_event_wait_q, &wait);
2875 set_current_state(TASK_RUNNING);
2876 return rc;
2877}
2878
2879/* return the state of the serial control and status signals
2880 */
2881static int tiocmget(struct tty_struct *tty, struct file *file)
2882{
2883 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2884 unsigned int result;
2885 unsigned long flags;
2886
2887 spin_lock_irqsave(&info->irq_spinlock,flags);
2888 usc_get_serial_signals(info);
2889 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2890
2891 result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
2892 ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
2893 ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
2894 ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
2895 ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
2896 ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
2897
2898 if (debug_level >= DEBUG_LEVEL_INFO)
2899 printk("%s(%d):%s tiocmget() value=%08X\n",
2900 __FILE__,__LINE__, info->device_name, result );
2901 return result;
2902}
2903
2904/* set modem control signals (DTR/RTS)
2905 */
2906static int tiocmset(struct tty_struct *tty, struct file *file,
2907 unsigned int set, unsigned int clear)
2908{
2909 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
2910 unsigned long flags;
2911
2912 if (debug_level >= DEBUG_LEVEL_INFO)
2913 printk("%s(%d):%s tiocmset(%x,%x)\n",
2914 __FILE__,__LINE__,info->device_name, set, clear);
2915
2916 if (set & TIOCM_RTS)
2917 info->serial_signals |= SerialSignal_RTS;
2918 if (set & TIOCM_DTR)
2919 info->serial_signals |= SerialSignal_DTR;
2920 if (clear & TIOCM_RTS)
2921 info->serial_signals &= ~SerialSignal_RTS;
2922 if (clear & TIOCM_DTR)
2923 info->serial_signals &= ~SerialSignal_DTR;
2924
2925 spin_lock_irqsave(&info->irq_spinlock,flags);
2926 usc_set_serial_signals(info);
2927 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2928
2929 return 0;
2930}
2931
2932/* mgsl_break() Set or clear transmit break condition
2933 *
2934 * Arguments: tty pointer to tty instance data
2935 * break_state -1=set break condition, 0=clear
2936 * Return Value: None
2937 */
2938static void mgsl_break(struct tty_struct *tty, int break_state)
2939{
2940 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2941 unsigned long flags;
2942
2943 if (debug_level >= DEBUG_LEVEL_INFO)
2944 printk("%s(%d):mgsl_break(%s,%d)\n",
2945 __FILE__,__LINE__, info->device_name, break_state);
2946
2947 if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
2948 return;
2949
2950 spin_lock_irqsave(&info->irq_spinlock,flags);
2951 if (break_state == -1)
2952 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
2953 else
2954 usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
2955 spin_unlock_irqrestore(&info->irq_spinlock,flags);
2956
2957} /* end of mgsl_break() */
2958
2959/* mgsl_ioctl() Service an IOCTL request
2960 *
2961 * Arguments:
2962 *
2963 * tty pointer to tty instance data
2964 * file pointer to associated file object for device
2965 * cmd IOCTL command code
2966 * arg command argument/context
2967 *
2968 * Return Value: 0 if success, otherwise error code
2969 */
2970static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
2971 unsigned int cmd, unsigned long arg)
2972{
2973 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
2974
2975 if (debug_level >= DEBUG_LEVEL_INFO)
2976 printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
2977 info->device_name, cmd );
2978
2979 if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
2980 return -ENODEV;
2981
2982 if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
2983 (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
2984 if (tty->flags & (1 << TTY_IO_ERROR))
2985 return -EIO;
2986 }
2987
2988 return mgsl_ioctl_common(info, cmd, arg);
2989}
2990
2991static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
2992{
2993 int error;
2994 struct mgsl_icount cnow; /* kernel counter temps */
2995 void __user *argp = (void __user *)arg;
2996 struct serial_icounter_struct __user *p_cuser; /* user space */
2997 unsigned long flags;
2998
2999 switch (cmd) {
3000 case MGSL_IOCGPARAMS:
3001 return mgsl_get_params(info, argp);
3002 case MGSL_IOCSPARAMS:
3003 return mgsl_set_params(info, argp);
3004 case MGSL_IOCGTXIDLE:
3005 return mgsl_get_txidle(info, argp);
3006 case MGSL_IOCSTXIDLE:
3007 return mgsl_set_txidle(info,(int)arg);
3008 case MGSL_IOCTXENABLE:
3009 return mgsl_txenable(info,(int)arg);
3010 case MGSL_IOCRXENABLE:
3011 return mgsl_rxenable(info,(int)arg);
3012 case MGSL_IOCTXABORT:
3013 return mgsl_txabort(info);
3014 case MGSL_IOCGSTATS:
3015 return mgsl_get_stats(info, argp);
3016 case MGSL_IOCWAITEVENT:
3017 return mgsl_wait_event(info, argp);
3018 case MGSL_IOCLOOPTXDONE:
3019 return mgsl_loopmode_send_done(info);
3020 /* Wait for modem input (DCD,RI,DSR,CTS) change
3021 * as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
3022 */
3023 case TIOCMIWAIT:
3024 return modem_input_wait(info,(int)arg);
3025
3026 /*
3027 * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
3028 * Return: write counters to the user passed counter struct
3029 * NB: both 1->0 and 0->1 transitions are counted except for
3030 * RI where only 0->1 is counted.
3031 */
3032 case TIOCGICOUNT:
3033 spin_lock_irqsave(&info->irq_spinlock,flags);
3034 cnow = info->icount;
3035 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3036 p_cuser = argp;
3037 PUT_USER(error,cnow.cts, &p_cuser->cts);
3038 if (error) return error;
3039 PUT_USER(error,cnow.dsr, &p_cuser->dsr);
3040 if (error) return error;
3041 PUT_USER(error,cnow.rng, &p_cuser->rng);
3042 if (error) return error;
3043 PUT_USER(error,cnow.dcd, &p_cuser->dcd);
3044 if (error) return error;
3045 PUT_USER(error,cnow.rx, &p_cuser->rx);
3046 if (error) return error;
3047 PUT_USER(error,cnow.tx, &p_cuser->tx);
3048 if (error) return error;
3049 PUT_USER(error,cnow.frame, &p_cuser->frame);
3050 if (error) return error;
3051 PUT_USER(error,cnow.overrun, &p_cuser->overrun);
3052 if (error) return error;
3053 PUT_USER(error,cnow.parity, &p_cuser->parity);
3054 if (error) return error;
3055 PUT_USER(error,cnow.brk, &p_cuser->brk);
3056 if (error) return error;
3057 PUT_USER(error,cnow.buf_overrun, &p_cuser->buf_overrun);
3058 if (error) return error;
3059 return 0;
3060 default:
3061 return -ENOIOCTLCMD;
3062 }
3063 return 0;
3064}
3065
3066/* mgsl_set_termios()
3067 *
3068 * Set new termios settings
3069 *
3070 * Arguments:
3071 *
3072 * tty pointer to tty structure
3073 * termios pointer to buffer to hold returned old termios
3074 *
3075 * Return Value: None
3076 */
3077static void mgsl_set_termios(struct tty_struct *tty, struct termios *old_termios)
3078{
3079 struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
3080 unsigned long flags;
3081
3082 if (debug_level >= DEBUG_LEVEL_INFO)
3083 printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
3084 tty->driver->name );
3085
3086 /* just return if nothing has changed */
3087 if ((tty->termios->c_cflag == old_termios->c_cflag)
3088 && (RELEVANT_IFLAG(tty->termios->c_iflag)
3089 == RELEVANT_IFLAG(old_termios->c_iflag)))
3090 return;
3091
3092 mgsl_change_params(info);
3093
3094 /* Handle transition to B0 status */
3095 if (old_termios->c_cflag & CBAUD &&
3096 !(tty->termios->c_cflag & CBAUD)) {
3097 info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR);
3098 spin_lock_irqsave(&info->irq_spinlock,flags);
3099 usc_set_serial_signals(info);
3100 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3101 }
3102
3103 /* Handle transition away from B0 status */
3104 if (!(old_termios->c_cflag & CBAUD) &&
3105 tty->termios->c_cflag & CBAUD) {
3106 info->serial_signals |= SerialSignal_DTR;
3107 if (!(tty->termios->c_cflag & CRTSCTS) ||
3108 !test_bit(TTY_THROTTLED, &tty->flags)) {
3109 info->serial_signals |= SerialSignal_RTS;
3110 }
3111 spin_lock_irqsave(&info->irq_spinlock,flags);
3112 usc_set_serial_signals(info);
3113 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3114 }
3115
3116 /* Handle turning off CRTSCTS */
3117 if (old_termios->c_cflag & CRTSCTS &&
3118 !(tty->termios->c_cflag & CRTSCTS)) {
3119 tty->hw_stopped = 0;
3120 mgsl_start(tty);
3121 }
3122
3123} /* end of mgsl_set_termios() */
3124
3125/* mgsl_close()
3126 *
3127 * Called when port is closed. Wait for remaining data to be
3128 * sent. Disable port and free resources.
3129 *
3130 * Arguments:
3131 *
3132 * tty pointer to open tty structure
3133 * filp pointer to open file object
3134 *
3135 * Return Value: None
3136 */
3137static void mgsl_close(struct tty_struct *tty, struct file * filp)
3138{
3139 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3140
3141 if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
3142 return;
3143
3144 if (debug_level >= DEBUG_LEVEL_INFO)
3145 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
3146 __FILE__,__LINE__, info->device_name, info->count);
3147
3148 if (!info->count)
3149 return;
3150
3151 if (tty_hung_up_p(filp))
3152 goto cleanup;
3153
3154 if ((tty->count == 1) && (info->count != 1)) {
3155 /*
3156 * tty->count is 1 and the tty structure will be freed.
3157 * info->count should be one in this case.
3158 * if it's not, correct it so that the port is shutdown.
3159 */
3160 printk("mgsl_close: bad refcount; tty->count is 1, "
3161 "info->count is %d\n", info->count);
3162 info->count = 1;
3163 }
3164
3165 info->count--;
3166
3167 /* if at least one open remaining, leave hardware active */
3168 if (info->count)
3169 goto cleanup;
3170
3171 info->flags |= ASYNC_CLOSING;
3172
3173 /* set tty->closing to notify line discipline to
3174 * only process XON/XOFF characters. Only the N_TTY
3175 * discipline appears to use this (ppp does not).
3176 */
3177 tty->closing = 1;
3178
3179 /* wait for transmit data to clear all layers */
3180
3181 if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE) {
3182 if (debug_level >= DEBUG_LEVEL_INFO)
3183 printk("%s(%d):mgsl_close(%s) calling tty_wait_until_sent\n",
3184 __FILE__,__LINE__, info->device_name );
3185 tty_wait_until_sent(tty, info->closing_wait);
3186 }
3187
3188 if (info->flags & ASYNC_INITIALIZED)
3189 mgsl_wait_until_sent(tty, info->timeout);
3190
3191 if (tty->driver->flush_buffer)
3192 tty->driver->flush_buffer(tty);
3193
3194 tty_ldisc_flush(tty);
3195
3196 shutdown(info);
3197
3198 tty->closing = 0;
3199 info->tty = NULL;
3200
3201 if (info->blocked_open) {
3202 if (info->close_delay) {
3203 msleep_interruptible(jiffies_to_msecs(info->close_delay));
3204 }
3205 wake_up_interruptible(&info->open_wait);
3206 }
3207
3208 info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
3209
3210 wake_up_interruptible(&info->close_wait);
3211
3212cleanup:
3213 if (debug_level >= DEBUG_LEVEL_INFO)
3214 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
3215 tty->driver->name, info->count);
3216
3217} /* end of mgsl_close() */
3218
3219/* mgsl_wait_until_sent()
3220 *
3221 * Wait until the transmitter is empty.
3222 *
3223 * Arguments:
3224 *
3225 * tty pointer to tty info structure
3226 * timeout time to wait for send completion
3227 *
3228 * Return Value: None
3229 */
3230static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
3231{
3232 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3233 unsigned long orig_jiffies, char_time;
3234
3235 if (!info )
3236 return;
3237
3238 if (debug_level >= DEBUG_LEVEL_INFO)
3239 printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
3240 __FILE__,__LINE__, info->device_name );
3241
3242 if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
3243 return;
3244
3245 if (!(info->flags & ASYNC_INITIALIZED))
3246 goto exit;
3247
3248 orig_jiffies = jiffies;
3249
3250 /* Set check interval to 1/5 of estimated time to
3251 * send a character, and make it at least 1. The check
3252 * interval should also be less than the timeout.
3253 * Note: use tight timings here to satisfy the NIST-PCTS.
3254 */
3255
3256 if ( info->params.data_rate ) {
3257 char_time = info->timeout/(32 * 5);
3258 if (!char_time)
3259 char_time++;
3260 } else
3261 char_time = 1;
3262
3263 if (timeout)
3264 char_time = min_t(unsigned long, char_time, timeout);
3265
3266 if ( info->params.mode == MGSL_MODE_HDLC ||
3267 info->params.mode == MGSL_MODE_RAW ) {
3268 while (info->tx_active) {
3269 msleep_interruptible(jiffies_to_msecs(char_time));
3270 if (signal_pending(current))
3271 break;
3272 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3273 break;
3274 }
3275 } else {
3276 while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
3277 info->tx_enabled) {
3278 msleep_interruptible(jiffies_to_msecs(char_time));
3279 if (signal_pending(current))
3280 break;
3281 if (timeout && time_after(jiffies, orig_jiffies + timeout))
3282 break;
3283 }
3284 }
3285
3286exit:
3287 if (debug_level >= DEBUG_LEVEL_INFO)
3288 printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
3289 __FILE__,__LINE__, info->device_name );
3290
3291} /* end of mgsl_wait_until_sent() */
3292
3293/* mgsl_hangup()
3294 *
3295 * Called by tty_hangup() when a hangup is signaled.
3296 * This is the same as to closing all open files for the port.
3297 *
3298 * Arguments: tty pointer to associated tty object
3299 * Return Value: None
3300 */
3301static void mgsl_hangup(struct tty_struct *tty)
3302{
3303 struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
3304
3305 if (debug_level >= DEBUG_LEVEL_INFO)
3306 printk("%s(%d):mgsl_hangup(%s)\n",
3307 __FILE__,__LINE__, info->device_name );
3308
3309 if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
3310 return;
3311
3312 mgsl_flush_buffer(tty);
3313 shutdown(info);
3314
3315 info->count = 0;
3316 info->flags &= ~ASYNC_NORMAL_ACTIVE;
3317 info->tty = NULL;
3318
3319 wake_up_interruptible(&info->open_wait);
3320
3321} /* end of mgsl_hangup() */
3322
3323/* block_til_ready()
3324 *
3325 * Block the current process until the specified port
3326 * is ready to be opened.
3327 *
3328 * Arguments:
3329 *
3330 * tty pointer to tty info structure
3331 * filp pointer to open file object
3332 * info pointer to device instance data
3333 *
3334 * Return Value: 0 if success, otherwise error code
3335 */
3336static int block_til_ready(struct tty_struct *tty, struct file * filp,
3337 struct mgsl_struct *info)
3338{
3339 DECLARE_WAITQUEUE(wait, current);
3340 int retval;
3341 int do_clocal = 0, extra_count = 0;
3342 unsigned long flags;
3343
3344 if (debug_level >= DEBUG_LEVEL_INFO)
3345 printk("%s(%d):block_til_ready on %s\n",
3346 __FILE__,__LINE__, tty->driver->name );
3347
3348 if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
3349 /* nonblock mode is set or port is not enabled */
3350 info->flags |= ASYNC_NORMAL_ACTIVE;
3351 return 0;
3352 }
3353
3354 if (tty->termios->c_cflag & CLOCAL)
3355 do_clocal = 1;
3356
3357 /* Wait for carrier detect and the line to become
3358 * free (i.e., not in use by the callout). While we are in
3359 * this loop, info->count is dropped by one, so that
3360 * mgsl_close() knows when to free things. We restore it upon
3361 * exit, either normal or abnormal.
3362 */
3363
3364 retval = 0;
3365 add_wait_queue(&info->open_wait, &wait);
3366
3367 if (debug_level >= DEBUG_LEVEL_INFO)
3368 printk("%s(%d):block_til_ready before block on %s count=%d\n",
3369 __FILE__,__LINE__, tty->driver->name, info->count );
3370
3371 spin_lock_irqsave(&info->irq_spinlock, flags);
3372 if (!tty_hung_up_p(filp)) {
3373 extra_count = 1;
3374 info->count--;
3375 }
3376 spin_unlock_irqrestore(&info->irq_spinlock, flags);
3377 info->blocked_open++;
3378
3379 while (1) {
3380 if (tty->termios->c_cflag & CBAUD) {
3381 spin_lock_irqsave(&info->irq_spinlock,flags);
3382 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
3383 usc_set_serial_signals(info);
3384 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3385 }
3386
3387 set_current_state(TASK_INTERRUPTIBLE);
3388
3389 if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)){
3390 retval = (info->flags & ASYNC_HUP_NOTIFY) ?
3391 -EAGAIN : -ERESTARTSYS;
3392 break;
3393 }
3394
3395 spin_lock_irqsave(&info->irq_spinlock,flags);
3396 usc_get_serial_signals(info);
3397 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3398
3399 if (!(info->flags & ASYNC_CLOSING) &&
3400 (do_clocal || (info->serial_signals & SerialSignal_DCD)) ) {
3401 break;
3402 }
3403
3404 if (signal_pending(current)) {
3405 retval = -ERESTARTSYS;
3406 break;
3407 }
3408
3409 if (debug_level >= DEBUG_LEVEL_INFO)
3410 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3411 __FILE__,__LINE__, tty->driver->name, info->count );
3412
3413 schedule();
3414 }
3415
3416 set_current_state(TASK_RUNNING);
3417 remove_wait_queue(&info->open_wait, &wait);
3418
3419 if (extra_count)
3420 info->count++;
3421 info->blocked_open--;
3422
3423 if (debug_level >= DEBUG_LEVEL_INFO)
3424 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
3425 __FILE__,__LINE__, tty->driver->name, info->count );
3426
3427 if (!retval)
3428 info->flags |= ASYNC_NORMAL_ACTIVE;
3429
3430 return retval;
3431
3432} /* end of block_til_ready() */
3433
3434/* mgsl_open()
3435 *
3436 * Called when a port is opened. Init and enable port.
3437 * Perform serial-specific initialization for the tty structure.
3438 *
3439 * Arguments: tty pointer to tty info structure
3440 * filp associated file pointer
3441 *
3442 * Return Value: 0 if success, otherwise error code
3443 */
3444static int mgsl_open(struct tty_struct *tty, struct file * filp)
3445{
3446 struct mgsl_struct *info;
3447 int retval, line;
3448 unsigned long page;
3449 unsigned long flags;
3450
3451 /* verify range of specified line number */
3452 line = tty->index;
3453 if ((line < 0) || (line >= mgsl_device_count)) {
3454 printk("%s(%d):mgsl_open with invalid line #%d.\n",
3455 __FILE__,__LINE__,line);
3456 return -ENODEV;
3457 }
3458
3459 /* find the info structure for the specified line */
3460 info = mgsl_device_list;
3461 while(info && info->line != line)
3462 info = info->next_device;
3463 if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
3464 return -ENODEV;
3465
3466 tty->driver_data = info;
3467 info->tty = tty;
3468
3469 if (debug_level >= DEBUG_LEVEL_INFO)
3470 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
3471 __FILE__,__LINE__,tty->driver->name, info->count);
3472
3473 /* If port is closing, signal caller to try again */
3474 if (tty_hung_up_p(filp) || info->flags & ASYNC_CLOSING){
3475 if (info->flags & ASYNC_CLOSING)
3476 interruptible_sleep_on(&info->close_wait);
3477 retval = ((info->flags & ASYNC_HUP_NOTIFY) ?
3478 -EAGAIN : -ERESTARTSYS);
3479 goto cleanup;
3480 }
3481
3482 if (!tmp_buf) {
3483 page = get_zeroed_page(GFP_KERNEL);
3484 if (!page) {
3485 retval = -ENOMEM;
3486 goto cleanup;
3487 }
3488 if (tmp_buf)
3489 free_page(page);
3490 else
3491 tmp_buf = (unsigned char *) page;
3492 }
3493
3494 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
3495
3496 spin_lock_irqsave(&info->netlock, flags);
3497 if (info->netcount) {
3498 retval = -EBUSY;
3499 spin_unlock_irqrestore(&info->netlock, flags);
3500 goto cleanup;
3501 }
3502 info->count++;
3503 spin_unlock_irqrestore(&info->netlock, flags);
3504
3505 if (info->count == 1) {
3506 /* 1st open on this device, init hardware */
3507 retval = startup(info);
3508 if (retval < 0)
3509 goto cleanup;
3510 }
3511
3512 retval = block_til_ready(tty, filp, info);
3513 if (retval) {
3514 if (debug_level >= DEBUG_LEVEL_INFO)
3515 printk("%s(%d):block_til_ready(%s) returned %d\n",
3516 __FILE__,__LINE__, info->device_name, retval);
3517 goto cleanup;
3518 }
3519
3520 if (debug_level >= DEBUG_LEVEL_INFO)
3521 printk("%s(%d):mgsl_open(%s) success\n",
3522 __FILE__,__LINE__, info->device_name);
3523 retval = 0;
3524
3525cleanup:
3526 if (retval) {
3527 if (tty->count == 1)
3528 info->tty = NULL; /* tty layer will release tty struct */
3529 if(info->count)
3530 info->count--;
3531 }
3532
3533 return retval;
3534
3535} /* end of mgsl_open() */
3536
3537/*
3538 * /proc fs routines....
3539 */
3540
3541static inline int line_info(char *buf, struct mgsl_struct *info)
3542{
3543 char stat_buf[30];
3544 int ret;
3545 unsigned long flags;
3546
3547 if (info->bus_type == MGSL_BUS_TYPE_PCI) {
3548 ret = sprintf(buf, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
3549 info->device_name, info->io_base, info->irq_level,
3550 info->phys_memory_base, info->phys_lcr_base);
3551 } else {
3552 ret = sprintf(buf, "%s:(E)ISA io:%04X irq:%d dma:%d",
3553 info->device_name, info->io_base,
3554 info->irq_level, info->dma_level);
3555 }
3556
3557 /* output current serial signal states */
3558 spin_lock_irqsave(&info->irq_spinlock,flags);
3559 usc_get_serial_signals(info);
3560 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3561
3562 stat_buf[0] = 0;
3563 stat_buf[1] = 0;
3564 if (info->serial_signals & SerialSignal_RTS)
3565 strcat(stat_buf, "|RTS");
3566 if (info->serial_signals & SerialSignal_CTS)
3567 strcat(stat_buf, "|CTS");
3568 if (info->serial_signals & SerialSignal_DTR)
3569 strcat(stat_buf, "|DTR");
3570 if (info->serial_signals & SerialSignal_DSR)
3571 strcat(stat_buf, "|DSR");
3572 if (info->serial_signals & SerialSignal_DCD)
3573 strcat(stat_buf, "|CD");
3574 if (info->serial_signals & SerialSignal_RI)
3575 strcat(stat_buf, "|RI");
3576
3577 if (info->params.mode == MGSL_MODE_HDLC ||
3578 info->params.mode == MGSL_MODE_RAW ) {
3579 ret += sprintf(buf+ret, " HDLC txok:%d rxok:%d",
3580 info->icount.txok, info->icount.rxok);
3581 if (info->icount.txunder)
3582 ret += sprintf(buf+ret, " txunder:%d", info->icount.txunder);
3583 if (info->icount.txabort)
3584 ret += sprintf(buf+ret, " txabort:%d", info->icount.txabort);
3585 if (info->icount.rxshort)
3586 ret += sprintf(buf+ret, " rxshort:%d", info->icount.rxshort);
3587 if (info->icount.rxlong)
3588 ret += sprintf(buf+ret, " rxlong:%d", info->icount.rxlong);
3589 if (info->icount.rxover)
3590 ret += sprintf(buf+ret, " rxover:%d", info->icount.rxover);
3591 if (info->icount.rxcrc)
3592 ret += sprintf(buf+ret, " rxcrc:%d", info->icount.rxcrc);
3593 } else {
3594 ret += sprintf(buf+ret, " ASYNC tx:%d rx:%d",
3595 info->icount.tx, info->icount.rx);
3596 if (info->icount.frame)
3597 ret += sprintf(buf+ret, " fe:%d", info->icount.frame);
3598 if (info->icount.parity)
3599 ret += sprintf(buf+ret, " pe:%d", info->icount.parity);
3600 if (info->icount.brk)
3601 ret += sprintf(buf+ret, " brk:%d", info->icount.brk);
3602 if (info->icount.overrun)
3603 ret += sprintf(buf+ret, " oe:%d", info->icount.overrun);
3604 }
3605
3606 /* Append serial signal status to end */
3607 ret += sprintf(buf+ret, " %s\n", stat_buf+1);
3608
3609 ret += sprintf(buf+ret, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
3610 info->tx_active,info->bh_requested,info->bh_running,
3611 info->pending_bh);
3612
3613 spin_lock_irqsave(&info->irq_spinlock,flags);
3614 {
3615 u16 Tcsr = usc_InReg( info, TCSR );
3616 u16 Tdmr = usc_InDmaReg( info, TDMR );
3617 u16 Ticr = usc_InReg( info, TICR );
3618 u16 Rscr = usc_InReg( info, RCSR );
3619 u16 Rdmr = usc_InDmaReg( info, RDMR );
3620 u16 Ricr = usc_InReg( info, RICR );
3621 u16 Icr = usc_InReg( info, ICR );
3622 u16 Dccr = usc_InReg( info, DCCR );
3623 u16 Tmr = usc_InReg( info, TMR );
3624 u16 Tccr = usc_InReg( info, TCCR );
3625 u16 Ccar = inw( info->io_base + CCAR );
3626 ret += sprintf(buf+ret, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
3627 "ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
3628 Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
3629 }
3630 spin_unlock_irqrestore(&info->irq_spinlock,flags);
3631
3632 return ret;
3633
3634} /* end of line_info() */
3635
3636/* mgsl_read_proc()
3637 *
3638 * Called to print information about devices
3639 *
3640 * Arguments:
3641 * page page of memory to hold returned info
3642 * start
3643 * off
3644 * count
3645 * eof
3646 * data
3647 *
3648 * Return Value:
3649 */
3650static int mgsl_read_proc(char *page, char **start, off_t off, int count,
3651 int *eof, void *data)
3652{
3653 int len = 0, l;
3654 off_t begin = 0;
3655 struct mgsl_struct *info;
3656
3657 len += sprintf(page, "synclink driver:%s\n", driver_version);
3658
3659 info = mgsl_device_list;
3660 while( info ) {
3661 l = line_info(page + len, info);
3662 len += l;
3663 if (len+begin > off+count)
3664 goto done;
3665 if (len+begin < off) {
3666 begin += len;
3667 len = 0;
3668 }
3669 info = info->next_device;
3670 }
3671
3672 *eof = 1;
3673done:
3674 if (off >= len+begin)
3675 return 0;
3676 *start = page + (off-begin);
3677 return ((count < begin+len-off) ? count : begin+len-off);
3678
3679} /* end of mgsl_read_proc() */
3680
3681/* mgsl_allocate_dma_buffers()
3682 *
3683 * Allocate and format DMA buffers (ISA adapter)
3684 * or format shared memory buffers (PCI adapter).
3685 *
3686 * Arguments: info pointer to device instance data
3687 * Return Value: 0 if success, otherwise error
3688 */
3689static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
3690{
3691 unsigned short BuffersPerFrame;
3692
3693 info->last_mem_alloc = 0;
3694
3695 /* Calculate the number of DMA buffers necessary to hold the */
3696 /* largest allowable frame size. Note: If the max frame size is */
3697 /* not an even multiple of the DMA buffer size then we need to */
3698 /* round the buffer count per frame up one. */
3699
3700 BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
3701 if ( info->max_frame_size % DMABUFFERSIZE )
3702 BuffersPerFrame++;
3703
3704 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3705 /*
3706 * The PCI adapter has 256KBytes of shared memory to use.
3707 * This is 64 PAGE_SIZE buffers.
3708 *
3709 * The first page is used for padding at this time so the
3710 * buffer list does not begin at offset 0 of the PCI
3711 * adapter's shared memory.
3712 *
3713 * The 2nd page is used for the buffer list. A 4K buffer
3714 * list can hold 128 DMA_BUFFER structures at 32 bytes
3715 * each.
3716 *
3717 * This leaves 62 4K pages.
3718 *
3719 * The next N pages are used for transmit frame(s). We
3720 * reserve enough 4K page blocks to hold the required
3721 * number of transmit dma buffers (num_tx_dma_buffers),
3722 * each of MaxFrameSize size.
3723 *
3724 * Of the remaining pages (62-N), determine how many can
3725 * be used to receive full MaxFrameSize inbound frames
3726 */
3727 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3728 info->rx_buffer_count = 62 - info->tx_buffer_count;
3729 } else {
3730 /* Calculate the number of PAGE_SIZE buffers needed for */
3731 /* receive and transmit DMA buffers. */
3732
3733
3734 /* Calculate the number of DMA buffers necessary to */
3735 /* hold 7 max size receive frames and one max size transmit frame. */
3736 /* The receive buffer count is bumped by one so we avoid an */
3737 /* End of List condition if all receive buffers are used when */
3738 /* using linked list DMA buffers. */
3739
3740 info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
3741 info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
3742
3743 /*
3744 * limit total TxBuffers & RxBuffers to 62 4K total
3745 * (ala PCI Allocation)
3746 */
3747
3748 if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
3749 info->rx_buffer_count = 62 - info->tx_buffer_count;
3750
3751 }
3752
3753 if ( debug_level >= DEBUG_LEVEL_INFO )
3754 printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
3755 __FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
3756
3757 if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
3758 mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
3759 mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
3760 mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
3761 mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
3762 printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
3763 return -ENOMEM;
3764 }
3765
3766 mgsl_reset_rx_dma_buffers( info );
3767 mgsl_reset_tx_dma_buffers( info );
3768
3769 return 0;
3770
3771} /* end of mgsl_allocate_dma_buffers() */
3772
3773/*
3774 * mgsl_alloc_buffer_list_memory()
3775 *
3776 * Allocate a common DMA buffer for use as the
3777 * receive and transmit buffer lists.
3778 *
3779 * A buffer list is a set of buffer entries where each entry contains
3780 * a pointer to an actual buffer and a pointer to the next buffer entry
3781 * (plus some other info about the buffer).
3782 *
3783 * The buffer entries for a list are built to form a circular list so
3784 * that when the entire list has been traversed you start back at the
3785 * beginning.
3786 *
3787 * This function allocates memory for just the buffer entries.
3788 * The links (pointer to next entry) are filled in with the physical
3789 * address of the next entry so the adapter can navigate the list
3790 * using bus master DMA. The pointers to the actual buffers are filled
3791 * out later when the actual buffers are allocated.
3792 *
3793 * Arguments: info pointer to device instance data
3794 * Return Value: 0 if success, otherwise error
3795 */
3796static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
3797{
3798 unsigned int i;
3799
3800 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3801 /* PCI adapter uses shared memory. */
3802 info->buffer_list = info->memory_base + info->last_mem_alloc;
3803 info->buffer_list_phys = info->last_mem_alloc;
3804 info->last_mem_alloc += BUFFERLISTSIZE;
3805 } else {
3806 /* ISA adapter uses system memory. */
3807 /* The buffer lists are allocated as a common buffer that both */
3808 /* the processor and adapter can access. This allows the driver to */
3809 /* inspect portions of the buffer while other portions are being */
3810 /* updated by the adapter using Bus Master DMA. */
3811
3812 info->buffer_list = kmalloc(BUFFERLISTSIZE, GFP_KERNEL | GFP_DMA);
3813 if ( info->buffer_list == NULL )
3814 return -ENOMEM;
3815
3816 info->buffer_list_phys = isa_virt_to_bus(info->buffer_list);
3817 }
3818
3819 /* We got the memory for the buffer entry lists. */
3820 /* Initialize the memory block to all zeros. */
3821 memset( info->buffer_list, 0, BUFFERLISTSIZE );
3822
3823 /* Save virtual address pointers to the receive and */
3824 /* transmit buffer lists. (Receive 1st). These pointers will */
3825 /* be used by the processor to access the lists. */
3826 info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3827 info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
3828 info->tx_buffer_list += info->rx_buffer_count;
3829
3830 /*
3831 * Build the links for the buffer entry lists such that
3832 * two circular lists are built. (Transmit and Receive).
3833 *
3834 * Note: the links are physical addresses
3835 * which are read by the adapter to determine the next
3836 * buffer entry to use.
3837 */
3838
3839 for ( i = 0; i < info->rx_buffer_count; i++ ) {
3840 /* calculate and store physical address of this buffer entry */
3841 info->rx_buffer_list[i].phys_entry =
3842 info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
3843
3844 /* calculate and store physical address of */
3845 /* next entry in cirular list of entries */
3846
3847 info->rx_buffer_list[i].link = info->buffer_list_phys;
3848
3849 if ( i < info->rx_buffer_count - 1 )
3850 info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3851 }
3852
3853 for ( i = 0; i < info->tx_buffer_count; i++ ) {
3854 /* calculate and store physical address of this buffer entry */
3855 info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
3856 ((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
3857
3858 /* calculate and store physical address of */
3859 /* next entry in cirular list of entries */
3860
3861 info->tx_buffer_list[i].link = info->buffer_list_phys +
3862 info->rx_buffer_count * sizeof(DMABUFFERENTRY);
3863
3864 if ( i < info->tx_buffer_count - 1 )
3865 info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
3866 }
3867
3868 return 0;
3869
3870} /* end of mgsl_alloc_buffer_list_memory() */
3871
3872/* Free DMA buffers allocated for use as the
3873 * receive and transmit buffer lists.
3874 * Warning:
3875 *
3876 * The data transfer buffers associated with the buffer list
3877 * MUST be freed before freeing the buffer list itself because
3878 * the buffer list contains the information necessary to free
3879 * the individual buffers!
3880 */
3881static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
3882{
3883 if ( info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI )
3884 kfree(info->buffer_list);
3885
3886 info->buffer_list = NULL;
3887 info->rx_buffer_list = NULL;
3888 info->tx_buffer_list = NULL;
3889
3890} /* end of mgsl_free_buffer_list_memory() */
3891
3892/*
3893 * mgsl_alloc_frame_memory()
3894 *
3895 * Allocate the frame DMA buffers used by the specified buffer list.
3896 * Each DMA buffer will be one memory page in size. This is necessary
3897 * because memory can fragment enough that it may be impossible
3898 * contiguous pages.
3899 *
3900 * Arguments:
3901 *
3902 * info pointer to device instance data
3903 * BufferList pointer to list of buffer entries
3904 * Buffercount count of buffer entries in buffer list
3905 *
3906 * Return Value: 0 if success, otherwise -ENOMEM
3907 */
3908static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
3909{
3910 int i;
3911 unsigned long phys_addr;
3912
3913 /* Allocate page sized buffers for the receive buffer list */
3914
3915 for ( i = 0; i < Buffercount; i++ ) {
3916 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
3917 /* PCI adapter uses shared memory buffers. */
3918 BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
3919 phys_addr = info->last_mem_alloc;
3920 info->last_mem_alloc += DMABUFFERSIZE;
3921 } else {
3922 /* ISA adapter uses system memory. */
3923 BufferList[i].virt_addr =
3924 kmalloc(DMABUFFERSIZE, GFP_KERNEL | GFP_DMA);
3925 if ( BufferList[i].virt_addr == NULL )
3926 return -ENOMEM;
3927 phys_addr = isa_virt_to_bus(BufferList[i].virt_addr);
3928 }
3929 BufferList[i].phys_addr = phys_addr;
3930 }
3931
3932 return 0;
3933
3934} /* end of mgsl_alloc_frame_memory() */
3935
3936/*
3937 * mgsl_free_frame_memory()
3938 *
3939 * Free the buffers associated with
3940 * each buffer entry of a buffer list.
3941 *
3942 * Arguments:
3943 *
3944 * info pointer to device instance data
3945 * BufferList pointer to list of buffer entries
3946 * Buffercount count of buffer entries in buffer list
3947 *
3948 * Return Value: None
3949 */
3950static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
3951{
3952 int i;
3953
3954 if ( BufferList ) {
3955 for ( i = 0 ; i < Buffercount ; i++ ) {
3956 if ( BufferList[i].virt_addr ) {
3957 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
3958 kfree(BufferList[i].virt_addr);
3959 BufferList[i].virt_addr = NULL;
3960 }
3961 }
3962 }
3963
3964} /* end of mgsl_free_frame_memory() */
3965
3966/* mgsl_free_dma_buffers()
3967 *
3968 * Free DMA buffers
3969 *
3970 * Arguments: info pointer to device instance data
3971 * Return Value: None
3972 */
3973static void mgsl_free_dma_buffers( struct mgsl_struct *info )
3974{
3975 mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
3976 mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
3977 mgsl_free_buffer_list_memory( info );
3978
3979} /* end of mgsl_free_dma_buffers() */
3980
3981
3982/*
3983 * mgsl_alloc_intermediate_rxbuffer_memory()
3984 *
3985 * Allocate a buffer large enough to hold max_frame_size. This buffer
3986 * is used to pass an assembled frame to the line discipline.
3987 *
3988 * Arguments:
3989 *
3990 * info pointer to device instance data
3991 *
3992 * Return Value: 0 if success, otherwise -ENOMEM
3993 */
3994static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
3995{
3996 info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
3997 if ( info->intermediate_rxbuffer == NULL )
3998 return -ENOMEM;
3999
4000 return 0;
4001
4002} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
4003
4004/*
4005 * mgsl_free_intermediate_rxbuffer_memory()
4006 *
4007 *
4008 * Arguments:
4009 *
4010 * info pointer to device instance data
4011 *
4012 * Return Value: None
4013 */
4014static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
4015{
4016 if ( info->intermediate_rxbuffer )
4017 kfree(info->intermediate_rxbuffer);
4018
4019 info->intermediate_rxbuffer = NULL;
4020
4021} /* end of mgsl_free_intermediate_rxbuffer_memory() */
4022
4023/*
4024 * mgsl_alloc_intermediate_txbuffer_memory()
4025 *
4026 * Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
4027 * This buffer is used to load transmit frames into the adapter's dma transfer
4028 * buffers when there is sufficient space.
4029 *
4030 * Arguments:
4031 *
4032 * info pointer to device instance data
4033 *
4034 * Return Value: 0 if success, otherwise -ENOMEM
4035 */
4036static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
4037{
4038 int i;
4039
4040 if ( debug_level >= DEBUG_LEVEL_INFO )
4041 printk("%s %s(%d) allocating %d tx holding buffers\n",
4042 info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
4043
4044 memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
4045
4046 for ( i=0; i<info->num_tx_holding_buffers; ++i) {
4047 info->tx_holding_buffers[i].buffer =
4048 kmalloc(info->max_frame_size, GFP_KERNEL);
4049 if ( info->tx_holding_buffers[i].buffer == NULL )
4050 return -ENOMEM;
4051 }
4052
4053 return 0;
4054
4055} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
4056
4057/*
4058 * mgsl_free_intermediate_txbuffer_memory()
4059 *
4060 *
4061 * Arguments:
4062 *
4063 * info pointer to device instance data
4064 *
4065 * Return Value: None
4066 */
4067static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
4068{
4069 int i;
4070
4071 for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
4072 if ( info->tx_holding_buffers[i].buffer ) {
4073 kfree(info->tx_holding_buffers[i].buffer);
4074 info->tx_holding_buffers[i].buffer=NULL;
4075 }
4076 }
4077
4078 info->get_tx_holding_index = 0;
4079 info->put_tx_holding_index = 0;
4080 info->tx_holding_count = 0;
4081
4082} /* end of mgsl_free_intermediate_txbuffer_memory() */
4083
4084
4085/*
4086 * load_next_tx_holding_buffer()
4087 *
4088 * attempts to load the next buffered tx request into the
4089 * tx dma buffers
4090 *
4091 * Arguments:
4092 *
4093 * info pointer to device instance data
4094 *
4095 * Return Value: 1 if next buffered tx request loaded
4096 * into adapter's tx dma buffer,
4097 * 0 otherwise
4098 */
4099static int load_next_tx_holding_buffer(struct mgsl_struct *info)
4100{
4101 int ret = 0;
4102
4103 if ( info->tx_holding_count ) {
4104 /* determine if we have enough tx dma buffers
4105 * to accommodate the next tx frame
4106 */
4107 struct tx_holding_buffer *ptx =
4108 &info->tx_holding_buffers[info->get_tx_holding_index];
4109 int num_free = num_free_tx_dma_buffers(info);
4110 int num_needed = ptx->buffer_size / DMABUFFERSIZE;
4111 if ( ptx->buffer_size % DMABUFFERSIZE )
4112 ++num_needed;
4113
4114 if (num_needed <= num_free) {
4115 info->xmit_cnt = ptx->buffer_size;
4116 mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
4117
4118 --info->tx_holding_count;
4119 if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
4120 info->get_tx_holding_index=0;
4121
4122 /* restart transmit timer */
4123 mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
4124
4125 ret = 1;
4126 }
4127 }
4128
4129 return ret;
4130}
4131
4132/*
4133 * save_tx_buffer_request()
4134 *
4135 * attempt to store transmit frame request for later transmission
4136 *
4137 * Arguments:
4138 *
4139 * info pointer to device instance data
4140 * Buffer pointer to buffer containing frame to load
4141 * BufferSize size in bytes of frame in Buffer
4142 *
4143 * Return Value: 1 if able to store, 0 otherwise
4144 */
4145static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
4146{
4147 struct tx_holding_buffer *ptx;
4148
4149 if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
4150 return 0; /* all buffers in use */
4151 }
4152
4153 ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
4154 ptx->buffer_size = BufferSize;
4155 memcpy( ptx->buffer, Buffer, BufferSize);
4156
4157 ++info->tx_holding_count;
4158 if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
4159 info->put_tx_holding_index=0;
4160
4161 return 1;
4162}
4163
4164static int mgsl_claim_resources(struct mgsl_struct *info)
4165{
4166 if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
4167 printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
4168 __FILE__,__LINE__,info->device_name, info->io_base);
4169 return -ENODEV;
4170 }
4171 info->io_addr_requested = 1;
4172
4173 if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
4174 info->device_name, info ) < 0 ) {
4175 printk( "%s(%d):Cant request interrupt on device %s IRQ=%d\n",
4176 __FILE__,__LINE__,info->device_name, info->irq_level );
4177 goto errout;
4178 }
4179 info->irq_requested = 1;
4180
4181 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4182 if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
4183 printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
4184 __FILE__,__LINE__,info->device_name, info->phys_memory_base);
4185 goto errout;
4186 }
4187 info->shared_mem_requested = 1;
4188 if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
4189 printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
4190 __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
4191 goto errout;
4192 }
4193 info->lcr_mem_requested = 1;
4194
4195 info->memory_base = ioremap(info->phys_memory_base,0x40000);
4196 if (!info->memory_base) {
4197 printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
4198 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4199 goto errout;
4200 }
4201
4202 if ( !mgsl_memory_test(info) ) {
4203 printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
4204 __FILE__,__LINE__,info->device_name, info->phys_memory_base );
4205 goto errout;
4206 }
4207
4208 info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
4209 if (!info->lcr_base) {
4210 printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
4211 __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
4212 goto errout;
4213 }
4214
4215 } else {
4216 /* claim DMA channel */
4217
4218 if (request_dma(info->dma_level,info->device_name) < 0){
4219 printk( "%s(%d):Cant request DMA channel on device %s DMA=%d\n",
4220 __FILE__,__LINE__,info->device_name, info->dma_level );
4221 mgsl_release_resources( info );
4222 return -ENODEV;
4223 }
4224 info->dma_requested = 1;
4225
4226 /* ISA adapter uses bus master DMA */
4227 set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
4228 enable_dma(info->dma_level);
4229 }
4230
4231 if ( mgsl_allocate_dma_buffers(info) < 0 ) {
4232 printk( "%s(%d):Cant allocate DMA buffers on device %s DMA=%d\n",
4233 __FILE__,__LINE__,info->device_name, info->dma_level );
4234 goto errout;
4235 }
4236
4237 return 0;
4238errout:
4239 mgsl_release_resources(info);
4240 return -ENODEV;
4241
4242} /* end of mgsl_claim_resources() */
4243
4244static void mgsl_release_resources(struct mgsl_struct *info)
4245{
4246 if ( debug_level >= DEBUG_LEVEL_INFO )
4247 printk( "%s(%d):mgsl_release_resources(%s) entry\n",
4248 __FILE__,__LINE__,info->device_name );
4249
4250 if ( info->irq_requested ) {
4251 free_irq(info->irq_level, info);
4252 info->irq_requested = 0;
4253 }
4254 if ( info->dma_requested ) {
4255 disable_dma(info->dma_level);
4256 free_dma(info->dma_level);
4257 info->dma_requested = 0;
4258 }
4259 mgsl_free_dma_buffers(info);
4260 mgsl_free_intermediate_rxbuffer_memory(info);
4261 mgsl_free_intermediate_txbuffer_memory(info);
4262
4263 if ( info->io_addr_requested ) {
4264 release_region(info->io_base,info->io_addr_size);
4265 info->io_addr_requested = 0;
4266 }
4267 if ( info->shared_mem_requested ) {
4268 release_mem_region(info->phys_memory_base,0x40000);
4269 info->shared_mem_requested = 0;
4270 }
4271 if ( info->lcr_mem_requested ) {
4272 release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
4273 info->lcr_mem_requested = 0;
4274 }
4275 if (info->memory_base){
4276 iounmap(info->memory_base);
4277 info->memory_base = NULL;
4278 }
4279 if (info->lcr_base){
4280 iounmap(info->lcr_base - info->lcr_offset);
4281 info->lcr_base = NULL;
4282 }
4283
4284 if ( debug_level >= DEBUG_LEVEL_INFO )
4285 printk( "%s(%d):mgsl_release_resources(%s) exit\n",
4286 __FILE__,__LINE__,info->device_name );
4287
4288} /* end of mgsl_release_resources() */
4289
4290/* mgsl_add_device()
4291 *
4292 * Add the specified device instance data structure to the
4293 * global linked list of devices and increment the device count.
4294 *
4295 * Arguments: info pointer to device instance data
4296 * Return Value: None
4297 */
4298static void mgsl_add_device( struct mgsl_struct *info )
4299{
4300 info->next_device = NULL;
4301 info->line = mgsl_device_count;
4302 sprintf(info->device_name,"ttySL%d",info->line);
4303
4304 if (info->line < MAX_TOTAL_DEVICES) {
4305 if (maxframe[info->line])
4306 info->max_frame_size = maxframe[info->line];
4307 info->dosyncppp = dosyncppp[info->line];
4308
4309 if (txdmabufs[info->line]) {
4310 info->num_tx_dma_buffers = txdmabufs[info->line];
4311 if (info->num_tx_dma_buffers < 1)
4312 info->num_tx_dma_buffers = 1;
4313 }
4314
4315 if (txholdbufs[info->line]) {
4316 info->num_tx_holding_buffers = txholdbufs[info->line];
4317 if (info->num_tx_holding_buffers < 1)
4318 info->num_tx_holding_buffers = 1;
4319 else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
4320 info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
4321 }
4322 }
4323
4324 mgsl_device_count++;
4325
4326 if ( !mgsl_device_list )
4327 mgsl_device_list = info;
4328 else {
4329 struct mgsl_struct *current_dev = mgsl_device_list;
4330 while( current_dev->next_device )
4331 current_dev = current_dev->next_device;
4332 current_dev->next_device = info;
4333 }
4334
4335 if ( info->max_frame_size < 4096 )
4336 info->max_frame_size = 4096;
4337 else if ( info->max_frame_size > 65535 )
4338 info->max_frame_size = 65535;
4339
4340 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
4341 printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
4342 info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
4343 info->phys_memory_base, info->phys_lcr_base,
4344 info->max_frame_size );
4345 } else {
4346 printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
4347 info->device_name, info->io_base, info->irq_level, info->dma_level,
4348 info->max_frame_size );
4349 }
4350
4351#ifdef CONFIG_HDLC
4352 hdlcdev_init(info);
4353#endif
4354
4355} /* end of mgsl_add_device() */
4356
4357/* mgsl_allocate_device()
4358 *
4359 * Allocate and initialize a device instance structure
4360 *
4361 * Arguments: none
4362 * Return Value: pointer to mgsl_struct if success, otherwise NULL
4363 */
4364static struct mgsl_struct* mgsl_allocate_device(void)
4365{
4366 struct mgsl_struct *info;
4367
4368 info = (struct mgsl_struct *)kmalloc(sizeof(struct mgsl_struct),
4369 GFP_KERNEL);
4370
4371 if (!info) {
4372 printk("Error can't allocate device instance data\n");
4373 } else {
4374 memset(info, 0, sizeof(struct mgsl_struct));
4375 info->magic = MGSL_MAGIC;
4376 INIT_WORK(&info->task, mgsl_bh_handler, info);
4377 info->max_frame_size = 4096;
4378 info->close_delay = 5*HZ/10;
4379 info->closing_wait = 30*HZ;
4380 init_waitqueue_head(&info->open_wait);
4381 init_waitqueue_head(&info->close_wait);
4382 init_waitqueue_head(&info->status_event_wait_q);
4383 init_waitqueue_head(&info->event_wait_q);
4384 spin_lock_init(&info->irq_spinlock);
4385 spin_lock_init(&info->netlock);
4386 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
4387 info->idle_mode = HDLC_TXIDLE_FLAGS;
4388 info->num_tx_dma_buffers = 1;
4389 info->num_tx_holding_buffers = 0;
4390 }
4391
4392 return info;
4393
4394} /* end of mgsl_allocate_device()*/
4395
4396static struct tty_operations mgsl_ops = {
4397 .open = mgsl_open,
4398 .close = mgsl_close,
4399 .write = mgsl_write,
4400 .put_char = mgsl_put_char,
4401 .flush_chars = mgsl_flush_chars,
4402 .write_room = mgsl_write_room,
4403 .chars_in_buffer = mgsl_chars_in_buffer,
4404 .flush_buffer = mgsl_flush_buffer,
4405 .ioctl = mgsl_ioctl,
4406 .throttle = mgsl_throttle,
4407 .unthrottle = mgsl_unthrottle,
4408 .send_xchar = mgsl_send_xchar,
4409 .break_ctl = mgsl_break,
4410 .wait_until_sent = mgsl_wait_until_sent,
4411 .read_proc = mgsl_read_proc,
4412 .set_termios = mgsl_set_termios,
4413 .stop = mgsl_stop,
4414 .start = mgsl_start,
4415 .hangup = mgsl_hangup,
4416 .tiocmget = tiocmget,
4417 .tiocmset = tiocmset,
4418};
4419
4420/*
4421 * perform tty device initialization
4422 */
4423static int mgsl_init_tty(void)
4424{
4425 int rc;
4426
4427 serial_driver = alloc_tty_driver(128);
4428 if (!serial_driver)
4429 return -ENOMEM;
4430
4431 serial_driver->owner = THIS_MODULE;
4432 serial_driver->driver_name = "synclink";
4433 serial_driver->name = "ttySL";
4434 serial_driver->major = ttymajor;
4435 serial_driver->minor_start = 64;
4436 serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
4437 serial_driver->subtype = SERIAL_TYPE_NORMAL;
4438 serial_driver->init_termios = tty_std_termios;
4439 serial_driver->init_termios.c_cflag =
4440 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
4441 serial_driver->flags = TTY_DRIVER_REAL_RAW;
4442 tty_set_operations(serial_driver, &mgsl_ops);
4443 if ((rc = tty_register_driver(serial_driver)) < 0) {
4444 printk("%s(%d):Couldn't register serial driver\n",
4445 __FILE__,__LINE__);
4446 put_tty_driver(serial_driver);
4447 serial_driver = NULL;
4448 return rc;
4449 }
4450
4451 printk("%s %s, tty major#%d\n",
4452 driver_name, driver_version,
4453 serial_driver->major);
4454 return 0;
4455}
4456
4457/* enumerate user specified ISA adapters
4458 */
4459static void mgsl_enum_isa_devices(void)
4460{
4461 struct mgsl_struct *info;
4462 int i;
4463
4464 /* Check for user specified ISA devices */
4465
4466 for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
4467 if ( debug_level >= DEBUG_LEVEL_INFO )
4468 printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
4469 io[i], irq[i], dma[i] );
4470
4471 info = mgsl_allocate_device();
4472 if ( !info ) {
4473 /* error allocating device instance data */
4474 if ( debug_level >= DEBUG_LEVEL_ERROR )
4475 printk( "can't allocate device instance data.\n");
4476 continue;
4477 }
4478
4479 /* Copy user configuration info to device instance data */
4480 info->io_base = (unsigned int)io[i];
4481 info->irq_level = (unsigned int)irq[i];
4482 info->irq_level = irq_canonicalize(info->irq_level);
4483 info->dma_level = (unsigned int)dma[i];
4484 info->bus_type = MGSL_BUS_TYPE_ISA;
4485 info->io_addr_size = 16;
4486 info->irq_flags = 0;
4487
4488 mgsl_add_device( info );
4489 }
4490}
4491
4492static void synclink_cleanup(void)
4493{
4494 int rc;
4495 struct mgsl_struct *info;
4496 struct mgsl_struct *tmp;
4497
4498 printk("Unloading %s: %s\n", driver_name, driver_version);
4499
4500 if (serial_driver) {
4501 if ((rc = tty_unregister_driver(serial_driver)))
4502 printk("%s(%d) failed to unregister tty driver err=%d\n",
4503 __FILE__,__LINE__,rc);
4504 put_tty_driver(serial_driver);
4505 }
4506
4507 info = mgsl_device_list;
4508 while(info) {
4509#ifdef CONFIG_HDLC
4510 hdlcdev_exit(info);
4511#endif
4512 mgsl_release_resources(info);
4513 tmp = info;
4514 info = info->next_device;
4515 kfree(tmp);
4516 }
4517
4518 if (tmp_buf) {
4519 free_page((unsigned long) tmp_buf);
4520 tmp_buf = NULL;
4521 }
4522
4523 if (pci_registered)
4524 pci_unregister_driver(&synclink_pci_driver);
4525}
4526
4527static int __init synclink_init(void)
4528{
4529 int rc;
4530
4531 if (break_on_load) {
4532 mgsl_get_text_ptr();
4533 BREAKPOINT();
4534 }
4535
4536 printk("%s %s\n", driver_name, driver_version);
4537
4538 mgsl_enum_isa_devices();
4539 if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
4540 printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
4541 else
4542 pci_registered = 1;
4543
4544 if ((rc = mgsl_init_tty()) < 0)
4545 goto error;
4546
4547 return 0;
4548
4549error:
4550 synclink_cleanup();
4551 return rc;
4552}
4553
4554static void __exit synclink_exit(void)
4555{
4556 synclink_cleanup();
4557}
4558
4559module_init(synclink_init);
4560module_exit(synclink_exit);
4561
4562/*
4563 * usc_RTCmd()
4564 *
4565 * Issue a USC Receive/Transmit command to the
4566 * Channel Command/Address Register (CCAR).
4567 *
4568 * Notes:
4569 *
4570 * The command is encoded in the most significant 5 bits <15..11>
4571 * of the CCAR value. Bits <10..7> of the CCAR must be preserved
4572 * and Bits <6..0> must be written as zeros.
4573 *
4574 * Arguments:
4575 *
4576 * info pointer to device information structure
4577 * Cmd command mask (use symbolic macros)
4578 *
4579 * Return Value:
4580 *
4581 * None
4582 */
4583static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
4584{
4585 /* output command to CCAR in bits <15..11> */
4586 /* preserve bits <10..7>, bits <6..0> must be zero */
4587
4588 outw( Cmd + info->loopback_bits, info->io_base + CCAR );
4589
4590 /* Read to flush write to CCAR */
4591 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4592 inw( info->io_base + CCAR );
4593
4594} /* end of usc_RTCmd() */
4595
4596/*
4597 * usc_DmaCmd()
4598 *
4599 * Issue a DMA command to the DMA Command/Address Register (DCAR).
4600 *
4601 * Arguments:
4602 *
4603 * info pointer to device information structure
4604 * Cmd DMA command mask (usc_DmaCmd_XX Macros)
4605 *
4606 * Return Value:
4607 *
4608 * None
4609 */
4610static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
4611{
4612 /* write command mask to DCAR */
4613 outw( Cmd + info->mbre_bit, info->io_base );
4614
4615 /* Read to flush write to DCAR */
4616 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4617 inw( info->io_base );
4618
4619} /* end of usc_DmaCmd() */
4620
4621/*
4622 * usc_OutDmaReg()
4623 *
4624 * Write a 16-bit value to a USC DMA register
4625 *
4626 * Arguments:
4627 *
4628 * info pointer to device info structure
4629 * RegAddr register address (number) for write
4630 * RegValue 16-bit value to write to register
4631 *
4632 * Return Value:
4633 *
4634 * None
4635 *
4636 */
4637static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4638{
4639 /* Note: The DCAR is located at the adapter base address */
4640 /* Note: must preserve state of BIT8 in DCAR */
4641
4642 outw( RegAddr + info->mbre_bit, info->io_base );
4643 outw( RegValue, info->io_base );
4644
4645 /* Read to flush write to DCAR */
4646 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4647 inw( info->io_base );
4648
4649} /* end of usc_OutDmaReg() */
4650
4651/*
4652 * usc_InDmaReg()
4653 *
4654 * Read a 16-bit value from a DMA register
4655 *
4656 * Arguments:
4657 *
4658 * info pointer to device info structure
4659 * RegAddr register address (number) to read from
4660 *
4661 * Return Value:
4662 *
4663 * The 16-bit value read from register
4664 *
4665 */
4666static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
4667{
4668 /* Note: The DCAR is located at the adapter base address */
4669 /* Note: must preserve state of BIT8 in DCAR */
4670
4671 outw( RegAddr + info->mbre_bit, info->io_base );
4672 return inw( info->io_base );
4673
4674} /* end of usc_InDmaReg() */
4675
4676/*
4677 *
4678 * usc_OutReg()
4679 *
4680 * Write a 16-bit value to a USC serial channel register
4681 *
4682 * Arguments:
4683 *
4684 * info pointer to device info structure
4685 * RegAddr register address (number) to write to
4686 * RegValue 16-bit value to write to register
4687 *
4688 * Return Value:
4689 *
4690 * None
4691 *
4692 */
4693static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
4694{
4695 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4696 outw( RegValue, info->io_base + CCAR );
4697
4698 /* Read to flush write to CCAR */
4699 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4700 inw( info->io_base + CCAR );
4701
4702} /* end of usc_OutReg() */
4703
4704/*
4705 * usc_InReg()
4706 *
4707 * Reads a 16-bit value from a USC serial channel register
4708 *
4709 * Arguments:
4710 *
4711 * info pointer to device extension
4712 * RegAddr register address (number) to read from
4713 *
4714 * Return Value:
4715 *
4716 * 16-bit value read from register
4717 */
4718static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
4719{
4720 outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
4721 return inw( info->io_base + CCAR );
4722
4723} /* end of usc_InReg() */
4724
4725/* usc_set_sdlc_mode()
4726 *
4727 * Set up the adapter for SDLC DMA communications.
4728 *
4729 * Arguments: info pointer to device instance data
4730 * Return Value: NONE
4731 */
4732static void usc_set_sdlc_mode( struct mgsl_struct *info )
4733{
4734 u16 RegValue;
4735 int PreSL1660;
4736
4737 /*
4738 * determine if the IUSC on the adapter is pre-SL1660. If
4739 * not, take advantage of the UnderWait feature of more
4740 * modern chips. If an underrun occurs and this bit is set,
4741 * the transmitter will idle the programmed idle pattern
4742 * until the driver has time to service the underrun. Otherwise,
4743 * the dma controller may get the cycles previously requested
4744 * and begin transmitting queued tx data.
4745 */
4746 usc_OutReg(info,TMCR,0x1f);
4747 RegValue=usc_InReg(info,TMDR);
4748 if ( RegValue == IUSC_PRE_SL1660 )
4749 PreSL1660 = 1;
4750 else
4751 PreSL1660 = 0;
4752
4753
4754 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
4755 {
4756 /*
4757 ** Channel Mode Register (CMR)
4758 **
4759 ** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
4760 ** <13> 0 0 = Transmit Disabled (initially)
4761 ** <12> 0 1 = Consecutive Idles share common 0
4762 ** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
4763 ** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
4764 ** <3..0> 0110 Receiver Mode = HDLC/SDLC
4765 **
4766 ** 1000 1110 0000 0110 = 0x8e06
4767 */
4768 RegValue = 0x8e06;
4769
4770 /*--------------------------------------------------
4771 * ignore user options for UnderRun Actions and
4772 * preambles
4773 *--------------------------------------------------*/
4774 }
4775 else
4776 {
4777 /* Channel mode Register (CMR)
4778 *
4779 * <15..14> 00 Tx Sub modes, Underrun Action
4780 * <13> 0 1 = Send Preamble before opening flag
4781 * <12> 0 1 = Consecutive Idles share common 0
4782 * <11..8> 0110 Transmitter mode = HDLC/SDLC
4783 * <7..4> 0000 Rx Sub modes, addr/ctrl field handling
4784 * <3..0> 0110 Receiver mode = HDLC/SDLC
4785 *
4786 * 0000 0110 0000 0110 = 0x0606
4787 */
4788 if (info->params.mode == MGSL_MODE_RAW) {
4789 RegValue = 0x0001; /* Set Receive mode = external sync */
4790
4791 usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
4792 (unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
4793
4794 /*
4795 * TxSubMode:
4796 * CMR <15> 0 Don't send CRC on Tx Underrun
4797 * CMR <14> x undefined
4798 * CMR <13> 0 Send preamble before openning sync
4799 * CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
4800 *
4801 * TxMode:
4802 * CMR <11-8) 0100 MonoSync
4803 *
4804 * 0x00 0100 xxxx xxxx 04xx
4805 */
4806 RegValue |= 0x0400;
4807 }
4808 else {
4809
4810 RegValue = 0x0606;
4811
4812 if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
4813 RegValue |= BIT14;
4814 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
4815 RegValue |= BIT15;
4816 else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
4817 RegValue |= BIT15 + BIT14;
4818 }
4819
4820 if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
4821 RegValue |= BIT13;
4822 }
4823
4824 if ( info->params.mode == MGSL_MODE_HDLC &&
4825 (info->params.flags & HDLC_FLAG_SHARE_ZERO) )
4826 RegValue |= BIT12;
4827
4828 if ( info->params.addr_filter != 0xff )
4829 {
4830 /* set up receive address filtering */
4831 usc_OutReg( info, RSR, info->params.addr_filter );
4832 RegValue |= BIT4;
4833 }
4834
4835 usc_OutReg( info, CMR, RegValue );
4836 info->cmr_value = RegValue;
4837
4838 /* Receiver mode Register (RMR)
4839 *
4840 * <15..13> 000 encoding
4841 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4842 * <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
4843 * <9> 0 1 = Include Receive chars in CRC
4844 * <8> 1 1 = Use Abort/PE bit as abort indicator
4845 * <7..6> 00 Even parity
4846 * <5> 0 parity disabled
4847 * <4..2> 000 Receive Char Length = 8 bits
4848 * <1..0> 00 Disable Receiver
4849 *
4850 * 0000 0101 0000 0000 = 0x0500
4851 */
4852
4853 RegValue = 0x0500;
4854
4855 switch ( info->params.encoding ) {
4856 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4857 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4858 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4859 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4860 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4861 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4862 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4863 }
4864
4865 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4866 RegValue |= BIT9;
4867 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4868 RegValue |= ( BIT12 | BIT10 | BIT9 );
4869
4870 usc_OutReg( info, RMR, RegValue );
4871
4872 /* Set the Receive count Limit Register (RCLR) to 0xffff. */
4873 /* When an opening flag of an SDLC frame is recognized the */
4874 /* Receive Character count (RCC) is loaded with the value in */
4875 /* RCLR. The RCC is decremented for each received byte. The */
4876 /* value of RCC is stored after the closing flag of the frame */
4877 /* allowing the frame size to be computed. */
4878
4879 usc_OutReg( info, RCLR, RCLRVALUE );
4880
4881 usc_RCmd( info, RCmd_SelectRicrdma_level );
4882
4883 /* Receive Interrupt Control Register (RICR)
4884 *
4885 * <15..8> ? RxFIFO DMA Request Level
4886 * <7> 0 Exited Hunt IA (Interrupt Arm)
4887 * <6> 0 Idle Received IA
4888 * <5> 0 Break/Abort IA
4889 * <4> 0 Rx Bound IA
4890 * <3> 1 Queued status reflects oldest 2 bytes in FIFO
4891 * <2> 0 Abort/PE IA
4892 * <1> 1 Rx Overrun IA
4893 * <0> 0 Select TC0 value for readback
4894 *
4895 * 0000 0000 0000 1000 = 0x000a
4896 */
4897
4898 /* Carry over the Exit Hunt and Idle Received bits */
4899 /* in case they have been armed by usc_ArmEvents. */
4900
4901 RegValue = usc_InReg( info, RICR ) & 0xc0;
4902
4903 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4904 usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
4905 else
4906 usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
4907
4908 /* Unlatch all Rx status bits and clear Rx status IRQ Pending */
4909
4910 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
4911 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
4912
4913 /* Transmit mode Register (TMR)
4914 *
4915 * <15..13> 000 encoding
4916 * <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
4917 * <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
4918 * <9> 0 1 = Tx CRC Enabled
4919 * <8> 0 1 = Append CRC to end of transmit frame
4920 * <7..6> 00 Transmit parity Even
4921 * <5> 0 Transmit parity Disabled
4922 * <4..2> 000 Tx Char Length = 8 bits
4923 * <1..0> 00 Disable Transmitter
4924 *
4925 * 0000 0100 0000 0000 = 0x0400
4926 */
4927
4928 RegValue = 0x0400;
4929
4930 switch ( info->params.encoding ) {
4931 case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
4932 case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
4933 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
4934 case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
4935 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
4936 case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
4937 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
4938 }
4939
4940 if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
4941 RegValue |= BIT9 + BIT8;
4942 else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
4943 RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
4944
4945 usc_OutReg( info, TMR, RegValue );
4946
4947 usc_set_txidle( info );
4948
4949
4950 usc_TCmd( info, TCmd_SelectTicrdma_level );
4951
4952 /* Transmit Interrupt Control Register (TICR)
4953 *
4954 * <15..8> ? Transmit FIFO DMA Level
4955 * <7> 0 Present IA (Interrupt Arm)
4956 * <6> 0 Idle Sent IA
4957 * <5> 1 Abort Sent IA
4958 * <4> 1 EOF/EOM Sent IA
4959 * <3> 0 CRC Sent IA
4960 * <2> 1 1 = Wait for SW Trigger to Start Frame
4961 * <1> 1 Tx Underrun IA
4962 * <0> 0 TC0 constant on read back
4963 *
4964 * 0000 0000 0011 0110 = 0x0036
4965 */
4966
4967 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
4968 usc_OutReg( info, TICR, 0x0736 );
4969 else
4970 usc_OutReg( info, TICR, 0x1436 );
4971
4972 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
4973 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
4974
4975 /*
4976 ** Transmit Command/Status Register (TCSR)
4977 **
4978 ** <15..12> 0000 TCmd
4979 ** <11> 0/1 UnderWait
4980 ** <10..08> 000 TxIdle
4981 ** <7> x PreSent
4982 ** <6> x IdleSent
4983 ** <5> x AbortSent
4984 ** <4> x EOF/EOM Sent
4985 ** <3> x CRC Sent
4986 ** <2> x All Sent
4987 ** <1> x TxUnder
4988 ** <0> x TxEmpty
4989 **
4990 ** 0000 0000 0000 0000 = 0x0000
4991 */
4992 info->tcsr_value = 0;
4993
4994 if ( !PreSL1660 )
4995 info->tcsr_value |= TCSR_UNDERWAIT;
4996
4997 usc_OutReg( info, TCSR, info->tcsr_value );
4998
4999 /* Clock mode Control Register (CMCR)
5000 *
5001 * <15..14> 00 counter 1 Source = Disabled
5002 * <13..12> 00 counter 0 Source = Disabled
5003 * <11..10> 11 BRG1 Input is TxC Pin
5004 * <9..8> 11 BRG0 Input is TxC Pin
5005 * <7..6> 01 DPLL Input is BRG1 Output
5006 * <5..3> XXX TxCLK comes from Port 0
5007 * <2..0> XXX RxCLK comes from Port 1
5008 *
5009 * 0000 1111 0111 0111 = 0x0f77
5010 */
5011
5012 RegValue = 0x0f40;
5013
5014 if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
5015 RegValue |= 0x0003; /* RxCLK from DPLL */
5016 else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
5017 RegValue |= 0x0004; /* RxCLK from BRG0 */
5018 else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
5019 RegValue |= 0x0006; /* RxCLK from TXC Input */
5020 else
5021 RegValue |= 0x0007; /* RxCLK from Port1 */
5022
5023 if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
5024 RegValue |= 0x0018; /* TxCLK from DPLL */
5025 else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
5026 RegValue |= 0x0020; /* TxCLK from BRG0 */
5027 else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
5028 RegValue |= 0x0038; /* RxCLK from TXC Input */
5029 else
5030 RegValue |= 0x0030; /* TxCLK from Port0 */
5031
5032 usc_OutReg( info, CMCR, RegValue );
5033
5034
5035 /* Hardware Configuration Register (HCR)
5036 *
5037 * <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
5038 * <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
5039 * <12> 0 CVOK:0=report code violation in biphase
5040 * <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
5041 * <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
5042 * <7..6> 00 reserved
5043 * <5> 0 BRG1 mode:0=continuous,1=single cycle
5044 * <4> X BRG1 Enable
5045 * <3..2> 00 reserved
5046 * <1> 0 BRG0 mode:0=continuous,1=single cycle
5047 * <0> 0 BRG0 Enable
5048 */
5049
5050 RegValue = 0x0000;
5051
5052 if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
5053 u32 XtalSpeed;
5054 u32 DpllDivisor;
5055 u16 Tc;
5056
5057 /* DPLL is enabled. Use BRG1 to provide continuous reference clock */
5058 /* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
5059
5060 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5061 XtalSpeed = 11059200;
5062 else
5063 XtalSpeed = 14745600;
5064
5065 if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
5066 DpllDivisor = 16;
5067 RegValue |= BIT10;
5068 }
5069 else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
5070 DpllDivisor = 8;
5071 RegValue |= BIT11;
5072 }
5073 else
5074 DpllDivisor = 32;
5075
5076 /* Tc = (Xtal/Speed) - 1 */
5077 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5078 /* then rounding up gives a more precise time constant. Instead */
5079 /* of rounding up and then subtracting 1 we just don't subtract */
5080 /* the one in this case. */
5081
5082 /*--------------------------------------------------
5083 * ejz: for DPLL mode, application should use the
5084 * same clock speed as the partner system, even
5085 * though clocking is derived from the input RxData.
5086 * In case the user uses a 0 for the clock speed,
5087 * default to 0xffffffff and don't try to divide by
5088 * zero
5089 *--------------------------------------------------*/
5090 if ( info->params.clock_speed )
5091 {
5092 Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
5093 if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
5094 / info->params.clock_speed) )
5095 Tc--;
5096 }
5097 else
5098 Tc = -1;
5099
5100
5101 /* Write 16-bit Time Constant for BRG1 */
5102 usc_OutReg( info, TC1R, Tc );
5103
5104 RegValue |= BIT4; /* enable BRG1 */
5105
5106 switch ( info->params.encoding ) {
5107 case HDLC_ENCODING_NRZ:
5108 case HDLC_ENCODING_NRZB:
5109 case HDLC_ENCODING_NRZI_MARK:
5110 case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
5111 case HDLC_ENCODING_BIPHASE_MARK:
5112 case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
5113 case HDLC_ENCODING_BIPHASE_LEVEL:
5114 case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
5115 }
5116 }
5117
5118 usc_OutReg( info, HCR, RegValue );
5119
5120
5121 /* Channel Control/status Register (CCSR)
5122 *
5123 * <15> X RCC FIFO Overflow status (RO)
5124 * <14> X RCC FIFO Not Empty status (RO)
5125 * <13> 0 1 = Clear RCC FIFO (WO)
5126 * <12> X DPLL Sync (RW)
5127 * <11> X DPLL 2 Missed Clocks status (RO)
5128 * <10> X DPLL 1 Missed Clock status (RO)
5129 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
5130 * <7> X SDLC Loop On status (RO)
5131 * <6> X SDLC Loop Send status (RO)
5132 * <5> 1 Bypass counters for TxClk and RxClk (RW)
5133 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
5134 * <1..0> 00 reserved
5135 *
5136 * 0000 0000 0010 0000 = 0x0020
5137 */
5138
5139 usc_OutReg( info, CCSR, 0x1020 );
5140
5141
5142 if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
5143 usc_OutReg( info, SICR,
5144 (u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
5145 }
5146
5147
5148 /* enable Master Interrupt Enable bit (MIE) */
5149 usc_EnableMasterIrqBit( info );
5150
5151 usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
5152 TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
5153
5154 /* arm RCC underflow interrupt */
5155 usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
5156 usc_EnableInterrupts(info, MISC);
5157
5158 info->mbre_bit = 0;
5159 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5160 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5161 info->mbre_bit = BIT8;
5162 outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
5163
5164 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
5165 /* Enable DMAEN (Port 7, Bit 14) */
5166 /* This connects the DMA request signal to the ISA bus */
5167 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
5168 }
5169
5170 /* DMA Control Register (DCR)
5171 *
5172 * <15..14> 10 Priority mode = Alternating Tx/Rx
5173 * 01 Rx has priority
5174 * 00 Tx has priority
5175 *
5176 * <13> 1 Enable Priority Preempt per DCR<15..14>
5177 * (WARNING DCR<11..10> must be 00 when this is 1)
5178 * 0 Choose activate channel per DCR<11..10>
5179 *
5180 * <12> 0 Little Endian for Array/List
5181 * <11..10> 00 Both Channels can use each bus grant
5182 * <9..6> 0000 reserved
5183 * <5> 0 7 CLK - Minimum Bus Re-request Interval
5184 * <4> 0 1 = drive D/C and S/D pins
5185 * <3> 1 1 = Add one wait state to all DMA cycles.
5186 * <2> 0 1 = Strobe /UAS on every transfer.
5187 * <1..0> 11 Addr incrementing only affects LS24 bits
5188 *
5189 * 0110 0000 0000 1011 = 0x600b
5190 */
5191
5192 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5193 /* PCI adapter does not need DMA wait state */
5194 usc_OutDmaReg( info, DCR, 0xa00b );
5195 }
5196 else
5197 usc_OutDmaReg( info, DCR, 0x800b );
5198
5199
5200 /* Receive DMA mode Register (RDMR)
5201 *
5202 * <15..14> 11 DMA mode = Linked List Buffer mode
5203 * <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
5204 * <12> 1 Clear count of List Entry after fetching
5205 * <11..10> 00 Address mode = Increment
5206 * <9> 1 Terminate Buffer on RxBound
5207 * <8> 0 Bus Width = 16bits
5208 * <7..0> ? status Bits (write as 0s)
5209 *
5210 * 1111 0010 0000 0000 = 0xf200
5211 */
5212
5213 usc_OutDmaReg( info, RDMR, 0xf200 );
5214
5215
5216 /* Transmit DMA mode Register (TDMR)
5217 *
5218 * <15..14> 11 DMA mode = Linked List Buffer mode
5219 * <13> 1 TCBinA/L = fetch Tx Control Block from List entry
5220 * <12> 1 Clear count of List Entry after fetching
5221 * <11..10> 00 Address mode = Increment
5222 * <9> 1 Terminate Buffer on end of frame
5223 * <8> 0 Bus Width = 16bits
5224 * <7..0> ? status Bits (Read Only so write as 0)
5225 *
5226 * 1111 0010 0000 0000 = 0xf200
5227 */
5228
5229 usc_OutDmaReg( info, TDMR, 0xf200 );
5230
5231
5232 /* DMA Interrupt Control Register (DICR)
5233 *
5234 * <15> 1 DMA Interrupt Enable
5235 * <14> 0 1 = Disable IEO from USC
5236 * <13> 0 1 = Don't provide vector during IntAck
5237 * <12> 1 1 = Include status in Vector
5238 * <10..2> 0 reserved, Must be 0s
5239 * <1> 0 1 = Rx DMA Interrupt Enabled
5240 * <0> 0 1 = Tx DMA Interrupt Enabled
5241 *
5242 * 1001 0000 0000 0000 = 0x9000
5243 */
5244
5245 usc_OutDmaReg( info, DICR, 0x9000 );
5246
5247 usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
5248 usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
5249 usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
5250
5251 /* Channel Control Register (CCR)
5252 *
5253 * <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
5254 * <13> 0 Trigger Tx on SW Command Disabled
5255 * <12> 0 Flag Preamble Disabled
5256 * <11..10> 00 Preamble Length
5257 * <9..8> 00 Preamble Pattern
5258 * <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
5259 * <5> 0 Trigger Rx on SW Command Disabled
5260 * <4..0> 0 reserved
5261 *
5262 * 1000 0000 1000 0000 = 0x8080
5263 */
5264
5265 RegValue = 0x8080;
5266
5267 switch ( info->params.preamble_length ) {
5268 case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
5269 case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
5270 case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
5271 }
5272
5273 switch ( info->params.preamble ) {
5274 case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
5275 case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
5276 case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
5277 case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
5278 }
5279
5280 usc_OutReg( info, CCR, RegValue );
5281
5282
5283 /*
5284 * Burst/Dwell Control Register
5285 *
5286 * <15..8> 0x20 Maximum number of transfers per bus grant
5287 * <7..0> 0x00 Maximum number of clock cycles per bus grant
5288 */
5289
5290 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5291 /* don't limit bus occupancy on PCI adapter */
5292 usc_OutDmaReg( info, BDCR, 0x0000 );
5293 }
5294 else
5295 usc_OutDmaReg( info, BDCR, 0x2000 );
5296
5297 usc_stop_transmitter(info);
5298 usc_stop_receiver(info);
5299
5300} /* end of usc_set_sdlc_mode() */
5301
5302/* usc_enable_loopback()
5303 *
5304 * Set the 16C32 for internal loopback mode.
5305 * The TxCLK and RxCLK signals are generated from the BRG0 and
5306 * the TxD is looped back to the RxD internally.
5307 *
5308 * Arguments: info pointer to device instance data
5309 * enable 1 = enable loopback, 0 = disable
5310 * Return Value: None
5311 */
5312static void usc_enable_loopback(struct mgsl_struct *info, int enable)
5313{
5314 if (enable) {
5315 /* blank external TXD output */
5316 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
5317
5318 /* Clock mode Control Register (CMCR)
5319 *
5320 * <15..14> 00 counter 1 Disabled
5321 * <13..12> 00 counter 0 Disabled
5322 * <11..10> 11 BRG1 Input is TxC Pin
5323 * <9..8> 11 BRG0 Input is TxC Pin
5324 * <7..6> 01 DPLL Input is BRG1 Output
5325 * <5..3> 100 TxCLK comes from BRG0
5326 * <2..0> 100 RxCLK comes from BRG0
5327 *
5328 * 0000 1111 0110 0100 = 0x0f64
5329 */
5330
5331 usc_OutReg( info, CMCR, 0x0f64 );
5332
5333 /* Write 16-bit Time Constant for BRG0 */
5334 /* use clock speed if available, otherwise use 8 for diagnostics */
5335 if (info->params.clock_speed) {
5336 if (info->bus_type == MGSL_BUS_TYPE_PCI)
5337 usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
5338 else
5339 usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
5340 } else
5341 usc_OutReg(info, TC0R, (u16)8);
5342
5343 /* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
5344 mode = Continuous Set Bit 0 to enable BRG0. */
5345 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5346
5347 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5348 usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
5349
5350 /* set Internal Data loopback mode */
5351 info->loopback_bits = 0x300;
5352 outw( 0x0300, info->io_base + CCAR );
5353 } else {
5354 /* enable external TXD output */
5355 usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
5356
5357 /* clear Internal Data loopback mode */
5358 info->loopback_bits = 0;
5359 outw( 0,info->io_base + CCAR );
5360 }
5361
5362} /* end of usc_enable_loopback() */
5363
5364/* usc_enable_aux_clock()
5365 *
5366 * Enabled the AUX clock output at the specified frequency.
5367 *
5368 * Arguments:
5369 *
5370 * info pointer to device extension
5371 * data_rate data rate of clock in bits per second
5372 * A data rate of 0 disables the AUX clock.
5373 *
5374 * Return Value: None
5375 */
5376static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
5377{
5378 u32 XtalSpeed;
5379 u16 Tc;
5380
5381 if ( data_rate ) {
5382 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
5383 XtalSpeed = 11059200;
5384 else
5385 XtalSpeed = 14745600;
5386
5387
5388 /* Tc = (Xtal/Speed) - 1 */
5389 /* If twice the remainder of (Xtal/Speed) is greater than Speed */
5390 /* then rounding up gives a more precise time constant. Instead */
5391 /* of rounding up and then subtracting 1 we just don't subtract */
5392 /* the one in this case. */
5393
5394
5395 Tc = (u16)(XtalSpeed/data_rate);
5396 if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
5397 Tc--;
5398
5399 /* Write 16-bit Time Constant for BRG0 */
5400 usc_OutReg( info, TC0R, Tc );
5401
5402 /*
5403 * Hardware Configuration Register (HCR)
5404 * Clear Bit 1, BRG0 mode = Continuous
5405 * Set Bit 0 to enable BRG0.
5406 */
5407
5408 usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
5409
5410 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
5411 usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
5412 } else {
5413 /* data rate == 0 so turn off BRG0 */
5414 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
5415 }
5416
5417} /* end of usc_enable_aux_clock() */
5418
5419/*
5420 *
5421 * usc_process_rxoverrun_sync()
5422 *
5423 * This function processes a receive overrun by resetting the
5424 * receive DMA buffers and issuing a Purge Rx FIFO command
5425 * to allow the receiver to continue receiving.
5426 *
5427 * Arguments:
5428 *
5429 * info pointer to device extension
5430 *
5431 * Return Value: None
5432 */
5433static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
5434{
5435 int start_index;
5436 int end_index;
5437 int frame_start_index;
5438 int start_of_frame_found = FALSE;
5439 int end_of_frame_found = FALSE;
5440 int reprogram_dma = FALSE;
5441
5442 DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
5443 u32 phys_addr;
5444
5445 usc_DmaCmd( info, DmaCmd_PauseRxChannel );
5446 usc_RCmd( info, RCmd_EnterHuntmode );
5447 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5448
5449 /* CurrentRxBuffer points to the 1st buffer of the next */
5450 /* possibly available receive frame. */
5451
5452 frame_start_index = start_index = end_index = info->current_rx_buffer;
5453
5454 /* Search for an unfinished string of buffers. This means */
5455 /* that a receive frame started (at least one buffer with */
5456 /* count set to zero) but there is no terminiting buffer */
5457 /* (status set to non-zero). */
5458
5459 while( !buffer_list[end_index].count )
5460 {
5461 /* Count field has been reset to zero by 16C32. */
5462 /* This buffer is currently in use. */
5463
5464 if ( !start_of_frame_found )
5465 {
5466 start_of_frame_found = TRUE;
5467 frame_start_index = end_index;
5468 end_of_frame_found = FALSE;
5469 }
5470
5471 if ( buffer_list[end_index].status )
5472 {
5473 /* Status field has been set by 16C32. */
5474 /* This is the last buffer of a received frame. */
5475
5476 /* We want to leave the buffers for this frame intact. */
5477 /* Move on to next possible frame. */
5478
5479 start_of_frame_found = FALSE;
5480 end_of_frame_found = TRUE;
5481 }
5482
5483 /* advance to next buffer entry in linked list */
5484 end_index++;
5485 if ( end_index == info->rx_buffer_count )
5486 end_index = 0;
5487
5488 if ( start_index == end_index )
5489 {
5490 /* The entire list has been searched with all Counts == 0 and */
5491 /* all Status == 0. The receive buffers are */
5492 /* completely screwed, reset all receive buffers! */
5493 mgsl_reset_rx_dma_buffers( info );
5494 frame_start_index = 0;
5495 start_of_frame_found = FALSE;
5496 reprogram_dma = TRUE;
5497 break;
5498 }
5499 }
5500
5501 if ( start_of_frame_found && !end_of_frame_found )
5502 {
5503 /* There is an unfinished string of receive DMA buffers */
5504 /* as a result of the receiver overrun. */
5505
5506 /* Reset the buffers for the unfinished frame */
5507 /* and reprogram the receive DMA controller to start */
5508 /* at the 1st buffer of unfinished frame. */
5509
5510 start_index = frame_start_index;
5511
5512 do
5513 {
5514 *((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
5515
5516 /* Adjust index for wrap around. */
5517 if ( start_index == info->rx_buffer_count )
5518 start_index = 0;
5519
5520 } while( start_index != end_index );
5521
5522 reprogram_dma = TRUE;
5523 }
5524
5525 if ( reprogram_dma )
5526 {
5527 usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
5528 usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5529 usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
5530
5531 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5532
5533 /* This empties the receive FIFO and loads the RCC with RCLR */
5534 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5535
5536 /* program 16C32 with physical address of 1st DMA buffer entry */
5537 phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
5538 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5539 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5540
5541 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5542 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5543 usc_EnableInterrupts( info, RECEIVE_STATUS );
5544
5545 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5546 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5547
5548 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5549 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5550 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5551 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5552 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5553 else
5554 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5555 }
5556 else
5557 {
5558 /* This empties the receive FIFO and loads the RCC with RCLR */
5559 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5560 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5561 }
5562
5563} /* end of usc_process_rxoverrun_sync() */
5564
5565/* usc_stop_receiver()
5566 *
5567 * Disable USC receiver
5568 *
5569 * Arguments: info pointer to device instance data
5570 * Return Value: None
5571 */
5572static void usc_stop_receiver( struct mgsl_struct *info )
5573{
5574 if (debug_level >= DEBUG_LEVEL_ISR)
5575 printk("%s(%d):usc_stop_receiver(%s)\n",
5576 __FILE__,__LINE__, info->device_name );
5577
5578 /* Disable receive DMA channel. */
5579 /* This also disables receive DMA channel interrupts */
5580 usc_DmaCmd( info, DmaCmd_ResetRxChannel );
5581
5582 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5583 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5584 usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
5585
5586 usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
5587
5588 /* This empties the receive FIFO and loads the RCC with RCLR */
5589 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5590 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5591
5592 info->rx_enabled = 0;
5593 info->rx_overflow = 0;
5594 info->rx_rcc_underrun = 0;
5595
5596} /* end of stop_receiver() */
5597
5598/* usc_start_receiver()
5599 *
5600 * Enable the USC receiver
5601 *
5602 * Arguments: info pointer to device instance data
5603 * Return Value: None
5604 */
5605static void usc_start_receiver( struct mgsl_struct *info )
5606{
5607 u32 phys_addr;
5608
5609 if (debug_level >= DEBUG_LEVEL_ISR)
5610 printk("%s(%d):usc_start_receiver(%s)\n",
5611 __FILE__,__LINE__, info->device_name );
5612
5613 mgsl_reset_rx_dma_buffers( info );
5614 usc_stop_receiver( info );
5615
5616 usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
5617 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5618
5619 if ( info->params.mode == MGSL_MODE_HDLC ||
5620 info->params.mode == MGSL_MODE_RAW ) {
5621 /* DMA mode Transfers */
5622 /* Program the DMA controller. */
5623 /* Enable the DMA controller end of buffer interrupt. */
5624
5625 /* program 16C32 with physical address of 1st DMA buffer entry */
5626 phys_addr = info->rx_buffer_list[0].phys_entry;
5627 usc_OutDmaReg( info, NRARL, (u16)phys_addr );
5628 usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
5629
5630 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
5631 usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
5632 usc_EnableInterrupts( info, RECEIVE_STATUS );
5633
5634 /* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
5635 /* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
5636
5637 usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
5638 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
5639 usc_DmaCmd( info, DmaCmd_InitRxChannel );
5640 if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
5641 usc_EnableReceiver(info,ENABLE_AUTO_DCD);
5642 else
5643 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5644 } else {
5645 usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
5646 usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
5647 usc_EnableInterrupts(info, RECEIVE_DATA);
5648
5649 usc_RTCmd( info, RTCmd_PurgeRxFifo );
5650 usc_RCmd( info, RCmd_EnterHuntmode );
5651
5652 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
5653 }
5654
5655 usc_OutReg( info, CCSR, 0x1020 );
5656
5657 info->rx_enabled = 1;
5658
5659} /* end of usc_start_receiver() */
5660
5661/* usc_start_transmitter()
5662 *
5663 * Enable the USC transmitter and send a transmit frame if
5664 * one is loaded in the DMA buffers.
5665 *
5666 * Arguments: info pointer to device instance data
5667 * Return Value: None
5668 */
5669static void usc_start_transmitter( struct mgsl_struct *info )
5670{
5671 u32 phys_addr;
5672 unsigned int FrameSize;
5673
5674 if (debug_level >= DEBUG_LEVEL_ISR)
5675 printk("%s(%d):usc_start_transmitter(%s)\n",
5676 __FILE__,__LINE__, info->device_name );
5677
5678 if ( info->xmit_cnt ) {
5679
5680 /* If auto RTS enabled and RTS is inactive, then assert */
5681 /* RTS and set a flag indicating that the driver should */
5682 /* negate RTS when the transmission completes. */
5683
5684 info->drop_rts_on_tx_done = 0;
5685
5686 if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
5687 usc_get_serial_signals( info );
5688 if ( !(info->serial_signals & SerialSignal_RTS) ) {
5689 info->serial_signals |= SerialSignal_RTS;
5690 usc_set_serial_signals( info );
5691 info->drop_rts_on_tx_done = 1;
5692 }
5693 }
5694
5695
5696 if ( info->params.mode == MGSL_MODE_ASYNC ) {
5697 if ( !info->tx_active ) {
5698 usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
5699 usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
5700 usc_EnableInterrupts(info, TRANSMIT_DATA);
5701 usc_load_txfifo(info);
5702 }
5703 } else {
5704 /* Disable transmit DMA controller while programming. */
5705 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5706
5707 /* Transmit DMA buffer is loaded, so program USC */
5708 /* to send the frame contained in the buffers. */
5709
5710 FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
5711
5712 /* if operating in Raw sync mode, reset the rcc component
5713 * of the tx dma buffer entry, otherwise, the serial controller
5714 * will send a closing sync char after this count.
5715 */
5716 if ( info->params.mode == MGSL_MODE_RAW )
5717 info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
5718
5719 /* Program the Transmit Character Length Register (TCLR) */
5720 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
5721 usc_OutReg( info, TCLR, (u16)FrameSize );
5722
5723 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5724
5725 /* Program the address of the 1st DMA Buffer Entry in linked list */
5726 phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
5727 usc_OutDmaReg( info, NTARL, (u16)phys_addr );
5728 usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
5729
5730 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5731 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
5732 usc_EnableInterrupts( info, TRANSMIT_STATUS );
5733
5734 if ( info->params.mode == MGSL_MODE_RAW &&
5735 info->num_tx_dma_buffers > 1 ) {
5736 /* When running external sync mode, attempt to 'stream' transmit */
5737 /* by filling tx dma buffers as they become available. To do this */
5738 /* we need to enable Tx DMA EOB Status interrupts : */
5739 /* */
5740 /* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
5741 /* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
5742
5743 usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
5744 usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
5745 }
5746
5747 /* Initialize Transmit DMA Channel */
5748 usc_DmaCmd( info, DmaCmd_InitTxChannel );
5749
5750 usc_TCmd( info, TCmd_SendFrame );
5751
5752 info->tx_timer.expires = jiffies + msecs_to_jiffies(5000);
5753 add_timer(&info->tx_timer);
5754 }
5755 info->tx_active = 1;
5756 }
5757
5758 if ( !info->tx_enabled ) {
5759 info->tx_enabled = 1;
5760 if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
5761 usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
5762 else
5763 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
5764 }
5765
5766} /* end of usc_start_transmitter() */
5767
5768/* usc_stop_transmitter()
5769 *
5770 * Stops the transmitter and DMA
5771 *
5772 * Arguments: info pointer to device isntance data
5773 * Return Value: None
5774 */
5775static void usc_stop_transmitter( struct mgsl_struct *info )
5776{
5777 if (debug_level >= DEBUG_LEVEL_ISR)
5778 printk("%s(%d):usc_stop_transmitter(%s)\n",
5779 __FILE__,__LINE__, info->device_name );
5780
5781 del_timer(&info->tx_timer);
5782
5783 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
5784 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5785 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
5786
5787 usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
5788 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
5789 usc_RTCmd( info, RTCmd_PurgeTxFifo );
5790
5791 info->tx_enabled = 0;
5792 info->tx_active = 0;
5793
5794} /* end of usc_stop_transmitter() */
5795
5796/* usc_load_txfifo()
5797 *
5798 * Fill the transmit FIFO until the FIFO is full or
5799 * there is no more data to load.
5800 *
5801 * Arguments: info pointer to device extension (instance data)
5802 * Return Value: None
5803 */
5804static void usc_load_txfifo( struct mgsl_struct *info )
5805{
5806 int Fifocount;
5807 u8 TwoBytes[2];
5808
5809 if ( !info->xmit_cnt && !info->x_char )
5810 return;
5811
5812 /* Select transmit FIFO status readback in TICR */
5813 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
5814
5815 /* load the Transmit FIFO until FIFOs full or all data sent */
5816
5817 while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
5818 /* there is more space in the transmit FIFO and */
5819 /* there is more data in transmit buffer */
5820
5821 if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
5822 /* write a 16-bit word from transmit buffer to 16C32 */
5823
5824 TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
5825 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5826 TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
5827 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5828
5829 outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
5830
5831 info->xmit_cnt -= 2;
5832 info->icount.tx += 2;
5833 } else {
5834 /* only 1 byte left to transmit or 1 FIFO slot left */
5835
5836 outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
5837 info->io_base + CCAR );
5838
5839 if (info->x_char) {
5840 /* transmit pending high priority char */
5841 outw( info->x_char,info->io_base + CCAR );
5842 info->x_char = 0;
5843 } else {
5844 outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
5845 info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
5846 info->xmit_cnt--;
5847 }
5848 info->icount.tx++;
5849 }
5850 }
5851
5852} /* end of usc_load_txfifo() */
5853
5854/* usc_reset()
5855 *
5856 * Reset the adapter to a known state and prepare it for further use.
5857 *
5858 * Arguments: info pointer to device instance data
5859 * Return Value: None
5860 */
5861static void usc_reset( struct mgsl_struct *info )
5862{
5863 if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
5864 int i;
5865 u32 readval;
5866
5867 /* Set BIT30 of Misc Control Register */
5868 /* (Local Control Register 0x50) to force reset of USC. */
5869
5870 volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
5871 u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
5872
5873 info->misc_ctrl_value |= BIT30;
5874 *MiscCtrl = info->misc_ctrl_value;
5875
5876 /*
5877 * Force at least 170ns delay before clearing
5878 * reset bit. Each read from LCR takes at least
5879 * 30ns so 10 times for 300ns to be safe.
5880 */
5881 for(i=0;i<10;i++)
5882 readval = *MiscCtrl;
5883
5884 info->misc_ctrl_value &= ~BIT30;
5885 *MiscCtrl = info->misc_ctrl_value;
5886
5887 *LCR0BRDR = BUS_DESCRIPTOR(
5888 1, // Write Strobe Hold (0-3)
5889 2, // Write Strobe Delay (0-3)
5890 2, // Read Strobe Delay (0-3)
5891 0, // NWDD (Write data-data) (0-3)
5892 4, // NWAD (Write Addr-data) (0-31)
5893 0, // NXDA (Read/Write Data-Addr) (0-3)
5894 0, // NRDD (Read Data-Data) (0-3)
5895 5 // NRAD (Read Addr-Data) (0-31)
5896 );
5897 } else {
5898 /* do HW reset */
5899 outb( 0,info->io_base + 8 );
5900 }
5901
5902 info->mbre_bit = 0;
5903 info->loopback_bits = 0;
5904 info->usc_idle_mode = 0;
5905
5906 /*
5907 * Program the Bus Configuration Register (BCR)
5908 *
5909 * <15> 0 Don't use separate address
5910 * <14..6> 0 reserved
5911 * <5..4> 00 IAckmode = Default, don't care
5912 * <3> 1 Bus Request Totem Pole output
5913 * <2> 1 Use 16 Bit data bus
5914 * <1> 0 IRQ Totem Pole output
5915 * <0> 0 Don't Shift Right Addr
5916 *
5917 * 0000 0000 0000 1100 = 0x000c
5918 *
5919 * By writing to io_base + SDPIN the Wait/Ack pin is
5920 * programmed to work as a Wait pin.
5921 */
5922
5923 outw( 0x000c,info->io_base + SDPIN );
5924
5925
5926 outw( 0,info->io_base );
5927 outw( 0,info->io_base + CCAR );
5928
5929 /* select little endian byte ordering */
5930 usc_RTCmd( info, RTCmd_SelectLittleEndian );
5931
5932
5933 /* Port Control Register (PCR)
5934 *
5935 * <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
5936 * <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
5937 * <11..10> 00 Port 5 is Input (No Connect, Don't Care)
5938 * <9..8> 00 Port 4 is Input (No Connect, Don't Care)
5939 * <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
5940 * <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
5941 * <3..2> 01 Port 1 is Input (Dedicated RxC)
5942 * <1..0> 01 Port 0 is Input (Dedicated TxC)
5943 *
5944 * 1111 0000 1111 0101 = 0xf0f5
5945 */
5946
5947 usc_OutReg( info, PCR, 0xf0f5 );
5948
5949
5950 /*
5951 * Input/Output Control Register
5952 *
5953 * <15..14> 00 CTS is active low input
5954 * <13..12> 00 DCD is active low input
5955 * <11..10> 00 TxREQ pin is input (DSR)
5956 * <9..8> 00 RxREQ pin is input (RI)
5957 * <7..6> 00 TxD is output (Transmit Data)
5958 * <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
5959 * <2..0> 100 RxC is Output (drive with BRG0)
5960 *
5961 * 0000 0000 0000 0100 = 0x0004
5962 */
5963
5964 usc_OutReg( info, IOCR, 0x0004 );
5965
5966} /* end of usc_reset() */
5967
5968/* usc_set_async_mode()
5969 *
5970 * Program adapter for asynchronous communications.
5971 *
5972 * Arguments: info pointer to device instance data
5973 * Return Value: None
5974 */
5975static void usc_set_async_mode( struct mgsl_struct *info )
5976{
5977 u16 RegValue;
5978
5979 /* disable interrupts while programming USC */
5980 usc_DisableMasterIrqBit( info );
5981
5982 outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
5983 usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
5984
5985 usc_loopback_frame( info );
5986
5987 /* Channel mode Register (CMR)
5988 *
5989 * <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
5990 * <13..12> 00 00 = 16X Clock
5991 * <11..8> 0000 Transmitter mode = Asynchronous
5992 * <7..6> 00 reserved?
5993 * <5..4> 00 Rx Sub modes, 00 = 16X Clock
5994 * <3..0> 0000 Receiver mode = Asynchronous
5995 *
5996 * 0000 0000 0000 0000 = 0x0
5997 */
5998
5999 RegValue = 0;
6000 if ( info->params.stop_bits != 1 )
6001 RegValue |= BIT14;
6002 usc_OutReg( info, CMR, RegValue );
6003
6004
6005 /* Receiver mode Register (RMR)
6006 *
6007 * <15..13> 000 encoding = None
6008 * <12..08> 00000 reserved (Sync Only)
6009 * <7..6> 00 Even parity
6010 * <5> 0 parity disabled
6011 * <4..2> 000 Receive Char Length = 8 bits
6012 * <1..0> 00 Disable Receiver
6013 *
6014 * 0000 0000 0000 0000 = 0x0
6015 */
6016
6017 RegValue = 0;
6018
6019 if ( info->params.data_bits != 8 )
6020 RegValue |= BIT4+BIT3+BIT2;
6021
6022 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6023 RegValue |= BIT5;
6024 if ( info->params.parity != ASYNC_PARITY_ODD )
6025 RegValue |= BIT6;
6026 }
6027
6028 usc_OutReg( info, RMR, RegValue );
6029
6030
6031 /* Set IRQ trigger level */
6032
6033 usc_RCmd( info, RCmd_SelectRicrIntLevel );
6034
6035
6036 /* Receive Interrupt Control Register (RICR)
6037 *
6038 * <15..8> ? RxFIFO IRQ Request Level
6039 *
6040 * Note: For async mode the receive FIFO level must be set
6041 * to 0 to aviod the situation where the FIFO contains fewer bytes
6042 * than the trigger level and no more data is expected.
6043 *
6044 * <7> 0 Exited Hunt IA (Interrupt Arm)
6045 * <6> 0 Idle Received IA
6046 * <5> 0 Break/Abort IA
6047 * <4> 0 Rx Bound IA
6048 * <3> 0 Queued status reflects oldest byte in FIFO
6049 * <2> 0 Abort/PE IA
6050 * <1> 0 Rx Overrun IA
6051 * <0> 0 Select TC0 value for readback
6052 *
6053 * 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
6054 */
6055
6056 usc_OutReg( info, RICR, 0x0000 );
6057
6058 usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
6059 usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
6060
6061
6062 /* Transmit mode Register (TMR)
6063 *
6064 * <15..13> 000 encoding = None
6065 * <12..08> 00000 reserved (Sync Only)
6066 * <7..6> 00 Transmit parity Even
6067 * <5> 0 Transmit parity Disabled
6068 * <4..2> 000 Tx Char Length = 8 bits
6069 * <1..0> 00 Disable Transmitter
6070 *
6071 * 0000 0000 0000 0000 = 0x0
6072 */
6073
6074 RegValue = 0;
6075
6076 if ( info->params.data_bits != 8 )
6077 RegValue |= BIT4+BIT3+BIT2;
6078
6079 if ( info->params.parity != ASYNC_PARITY_NONE ) {
6080 RegValue |= BIT5;
6081 if ( info->params.parity != ASYNC_PARITY_ODD )
6082 RegValue |= BIT6;
6083 }
6084
6085 usc_OutReg( info, TMR, RegValue );
6086
6087 usc_set_txidle( info );
6088
6089
6090 /* Set IRQ trigger level */
6091
6092 usc_TCmd( info, TCmd_SelectTicrIntLevel );
6093
6094
6095 /* Transmit Interrupt Control Register (TICR)
6096 *
6097 * <15..8> ? Transmit FIFO IRQ Level
6098 * <7> 0 Present IA (Interrupt Arm)
6099 * <6> 1 Idle Sent IA
6100 * <5> 0 Abort Sent IA
6101 * <4> 0 EOF/EOM Sent IA
6102 * <3> 0 CRC Sent IA
6103 * <2> 0 1 = Wait for SW Trigger to Start Frame
6104 * <1> 0 Tx Underrun IA
6105 * <0> 0 TC0 constant on read back
6106 *
6107 * 0000 0000 0100 0000 = 0x0040
6108 */
6109
6110 usc_OutReg( info, TICR, 0x1f40 );
6111
6112 usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
6113 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
6114
6115 usc_enable_async_clock( info, info->params.data_rate );
6116
6117
6118 /* Channel Control/status Register (CCSR)
6119 *
6120 * <15> X RCC FIFO Overflow status (RO)
6121 * <14> X RCC FIFO Not Empty status (RO)
6122 * <13> 0 1 = Clear RCC FIFO (WO)
6123 * <12> X DPLL in Sync status (RO)
6124 * <11> X DPLL 2 Missed Clocks status (RO)
6125 * <10> X DPLL 1 Missed Clock status (RO)
6126 * <9..8> 00 DPLL Resync on rising and falling edges (RW)
6127 * <7> X SDLC Loop On status (RO)
6128 * <6> X SDLC Loop Send status (RO)
6129 * <5> 1 Bypass counters for TxClk and RxClk (RW)
6130 * <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
6131 * <1..0> 00 reserved
6132 *
6133 * 0000 0000 0010 0000 = 0x0020
6134 */
6135
6136 usc_OutReg( info, CCSR, 0x0020 );
6137
6138 usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6139 RECEIVE_DATA + RECEIVE_STATUS );
6140
6141 usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
6142 RECEIVE_DATA + RECEIVE_STATUS );
6143
6144 usc_EnableMasterIrqBit( info );
6145
6146 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6147 /* Enable INTEN (Port 6, Bit12) */
6148 /* This connects the IRQ request signal to the ISA bus */
6149 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6150 }
6151
6152} /* end of usc_set_async_mode() */
6153
6154/* usc_loopback_frame()
6155 *
6156 * Loop back a small (2 byte) dummy SDLC frame.
6157 * Interrupts and DMA are NOT used. The purpose of this is to
6158 * clear any 'stale' status info left over from running in async mode.
6159 *
6160 * The 16C32 shows the strange behaviour of marking the 1st
6161 * received SDLC frame with a CRC error even when there is no
6162 * CRC error. To get around this a small dummy from of 2 bytes
6163 * is looped back when switching from async to sync mode.
6164 *
6165 * Arguments: info pointer to device instance data
6166 * Return Value: None
6167 */
6168static void usc_loopback_frame( struct mgsl_struct *info )
6169{
6170 int i;
6171 unsigned long oldmode = info->params.mode;
6172
6173 info->params.mode = MGSL_MODE_HDLC;
6174
6175 usc_DisableMasterIrqBit( info );
6176
6177 usc_set_sdlc_mode( info );
6178 usc_enable_loopback( info, 1 );
6179
6180 /* Write 16-bit Time Constant for BRG0 */
6181 usc_OutReg( info, TC0R, 0 );
6182
6183 /* Channel Control Register (CCR)
6184 *
6185 * <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
6186 * <13> 0 Trigger Tx on SW Command Disabled
6187 * <12> 0 Flag Preamble Disabled
6188 * <11..10> 00 Preamble Length = 8-Bits
6189 * <9..8> 01 Preamble Pattern = flags
6190 * <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
6191 * <5> 0 Trigger Rx on SW Command Disabled
6192 * <4..0> 0 reserved
6193 *
6194 * 0000 0001 0000 0000 = 0x0100
6195 */
6196
6197 usc_OutReg( info, CCR, 0x0100 );
6198
6199 /* SETUP RECEIVER */
6200 usc_RTCmd( info, RTCmd_PurgeRxFifo );
6201 usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
6202
6203 /* SETUP TRANSMITTER */
6204 /* Program the Transmit Character Length Register (TCLR) */
6205 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
6206 usc_OutReg( info, TCLR, 2 );
6207 usc_RTCmd( info, RTCmd_PurgeTxFifo );
6208
6209 /* unlatch Tx status bits, and start transmit channel. */
6210 usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
6211 outw(0,info->io_base + DATAREG);
6212
6213 /* ENABLE TRANSMITTER */
6214 usc_TCmd( info, TCmd_SendFrame );
6215 usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
6216
6217 /* WAIT FOR RECEIVE COMPLETE */
6218 for (i=0 ; i<1000 ; i++)
6219 if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
6220 break;
6221
6222 /* clear Internal Data loopback mode */
6223 usc_enable_loopback(info, 0);
6224
6225 usc_EnableMasterIrqBit(info);
6226
6227 info->params.mode = oldmode;
6228
6229} /* end of usc_loopback_frame() */
6230
6231/* usc_set_sync_mode() Programs the USC for SDLC communications.
6232 *
6233 * Arguments: info pointer to adapter info structure
6234 * Return Value: None
6235 */
6236static void usc_set_sync_mode( struct mgsl_struct *info )
6237{
6238 usc_loopback_frame( info );
6239 usc_set_sdlc_mode( info );
6240
6241 if (info->bus_type == MGSL_BUS_TYPE_ISA) {
6242 /* Enable INTEN (Port 6, Bit12) */
6243 /* This connects the IRQ request signal to the ISA bus */
6244 usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
6245 }
6246
6247 usc_enable_aux_clock(info, info->params.clock_speed);
6248
6249 if (info->params.loopback)
6250 usc_enable_loopback(info,1);
6251
6252} /* end of mgsl_set_sync_mode() */
6253
6254/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
6255 *
6256 * Arguments: info pointer to device instance data
6257 * Return Value: None
6258 */
6259static void usc_set_txidle( struct mgsl_struct *info )
6260{
6261 u16 usc_idle_mode = IDLEMODE_FLAGS;
6262
6263 /* Map API idle mode to USC register bits */
6264
6265 switch( info->idle_mode ){
6266 case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
6267 case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
6268 case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
6269 case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
6270 case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
6271 case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
6272 case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
6273 }
6274
6275 info->usc_idle_mode = usc_idle_mode;
6276 //usc_OutReg(info, TCSR, usc_idle_mode);
6277 info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
6278 info->tcsr_value += usc_idle_mode;
6279 usc_OutReg(info, TCSR, info->tcsr_value);
6280
6281 /*
6282 * if SyncLink WAN adapter is running in external sync mode, the
6283 * transmitter has been set to Monosync in order to try to mimic
6284 * a true raw outbound bit stream. Monosync still sends an open/close
6285 * sync char at the start/end of a frame. Try to match those sync
6286 * patterns to the idle mode set here
6287 */
6288 if ( info->params.mode == MGSL_MODE_RAW ) {
6289 unsigned char syncpat = 0;
6290 switch( info->idle_mode ) {
6291 case HDLC_TXIDLE_FLAGS:
6292 syncpat = 0x7e;
6293 break;
6294 case HDLC_TXIDLE_ALT_ZEROS_ONES:
6295 syncpat = 0x55;
6296 break;
6297 case HDLC_TXIDLE_ZEROS:
6298 case HDLC_TXIDLE_SPACE:
6299 syncpat = 0x00;
6300 break;
6301 case HDLC_TXIDLE_ONES:
6302 case HDLC_TXIDLE_MARK:
6303 syncpat = 0xff;
6304 break;
6305 case HDLC_TXIDLE_ALT_MARK_SPACE:
6306 syncpat = 0xaa;
6307 break;
6308 }
6309
6310 usc_SetTransmitSyncChars(info,syncpat,syncpat);
6311 }
6312
6313} /* end of usc_set_txidle() */
6314
6315/* usc_get_serial_signals()
6316 *
6317 * Query the adapter for the state of the V24 status (input) signals.
6318 *
6319 * Arguments: info pointer to device instance data
6320 * Return Value: None
6321 */
6322static void usc_get_serial_signals( struct mgsl_struct *info )
6323{
6324 u16 status;
6325
6326 /* clear all serial signals except DTR and RTS */
6327 info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS;
6328
6329 /* Read the Misc Interrupt status Register (MISR) to get */
6330 /* the V24 status signals. */
6331
6332 status = usc_InReg( info, MISR );
6333
6334 /* set serial signal bits to reflect MISR */
6335
6336 if ( status & MISCSTATUS_CTS )
6337 info->serial_signals |= SerialSignal_CTS;
6338
6339 if ( status & MISCSTATUS_DCD )
6340 info->serial_signals |= SerialSignal_DCD;
6341
6342 if ( status & MISCSTATUS_RI )
6343 info->serial_signals |= SerialSignal_RI;
6344
6345 if ( status & MISCSTATUS_DSR )
6346 info->serial_signals |= SerialSignal_DSR;
6347
6348} /* end of usc_get_serial_signals() */
6349
6350/* usc_set_serial_signals()
6351 *
6352 * Set the state of DTR and RTS based on contents of
6353 * serial_signals member of device extension.
6354 *
6355 * Arguments: info pointer to device instance data
6356 * Return Value: None
6357 */
6358static void usc_set_serial_signals( struct mgsl_struct *info )
6359{
6360 u16 Control;
6361 unsigned char V24Out = info->serial_signals;
6362
6363 /* get the current value of the Port Control Register (PCR) */
6364
6365 Control = usc_InReg( info, PCR );
6366
6367 if ( V24Out & SerialSignal_RTS )
6368 Control &= ~(BIT6);
6369 else
6370 Control |= BIT6;
6371
6372 if ( V24Out & SerialSignal_DTR )
6373 Control &= ~(BIT4);
6374 else
6375 Control |= BIT4;
6376
6377 usc_OutReg( info, PCR, Control );
6378
6379} /* end of usc_set_serial_signals() */
6380
6381/* usc_enable_async_clock()
6382 *
6383 * Enable the async clock at the specified frequency.
6384 *
6385 * Arguments: info pointer to device instance data
6386 * data_rate data rate of clock in bps
6387 * 0 disables the AUX clock.
6388 * Return Value: None
6389 */
6390static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
6391{
6392 if ( data_rate ) {
6393 /*
6394 * Clock mode Control Register (CMCR)
6395 *
6396 * <15..14> 00 counter 1 Disabled
6397 * <13..12> 00 counter 0 Disabled
6398 * <11..10> 11 BRG1 Input is TxC Pin
6399 * <9..8> 11 BRG0 Input is TxC Pin
6400 * <7..6> 01 DPLL Input is BRG1 Output
6401 * <5..3> 100 TxCLK comes from BRG0
6402 * <2..0> 100 RxCLK comes from BRG0
6403 *
6404 * 0000 1111 0110 0100 = 0x0f64
6405 */
6406
6407 usc_OutReg( info, CMCR, 0x0f64 );
6408
6409
6410 /*
6411 * Write 16-bit Time Constant for BRG0
6412 * Time Constant = (ClkSpeed / data_rate) - 1
6413 * ClkSpeed = 921600 (ISA), 691200 (PCI)
6414 */
6415
6416 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
6417 usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
6418 else
6419 usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
6420
6421
6422 /*
6423 * Hardware Configuration Register (HCR)
6424 * Clear Bit 1, BRG0 mode = Continuous
6425 * Set Bit 0 to enable BRG0.
6426 */
6427
6428 usc_OutReg( info, HCR,
6429 (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
6430
6431
6432 /* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
6433
6434 usc_OutReg( info, IOCR,
6435 (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
6436 } else {
6437 /* data rate == 0 so turn off BRG0 */
6438 usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
6439 }
6440
6441} /* end of usc_enable_async_clock() */
6442
6443/*
6444 * Buffer Structures:
6445 *
6446 * Normal memory access uses virtual addresses that can make discontiguous
6447 * physical memory pages appear to be contiguous in the virtual address
6448 * space (the processors memory mapping handles the conversions).
6449 *
6450 * DMA transfers require physically contiguous memory. This is because
6451 * the DMA system controller and DMA bus masters deal with memory using
6452 * only physical addresses.
6453 *
6454 * This causes a problem under Windows NT when large DMA buffers are
6455 * needed. Fragmentation of the nonpaged pool prevents allocations of
6456 * physically contiguous buffers larger than the PAGE_SIZE.
6457 *
6458 * However the 16C32 supports Bus Master Scatter/Gather DMA which
6459 * allows DMA transfers to physically discontiguous buffers. Information
6460 * about each data transfer buffer is contained in a memory structure
6461 * called a 'buffer entry'. A list of buffer entries is maintained
6462 * to track and control the use of the data transfer buffers.
6463 *
6464 * To support this strategy we will allocate sufficient PAGE_SIZE
6465 * contiguous memory buffers to allow for the total required buffer
6466 * space.
6467 *
6468 * The 16C32 accesses the list of buffer entries using Bus Master
6469 * DMA. Control information is read from the buffer entries by the
6470 * 16C32 to control data transfers. status information is written to
6471 * the buffer entries by the 16C32 to indicate the status of completed
6472 * transfers.
6473 *
6474 * The CPU writes control information to the buffer entries to control
6475 * the 16C32 and reads status information from the buffer entries to
6476 * determine information about received and transmitted frames.
6477 *
6478 * Because the CPU and 16C32 (adapter) both need simultaneous access
6479 * to the buffer entries, the buffer entry memory is allocated with
6480 * HalAllocateCommonBuffer(). This restricts the size of the buffer
6481 * entry list to PAGE_SIZE.
6482 *
6483 * The actual data buffers on the other hand will only be accessed
6484 * by the CPU or the adapter but not by both simultaneously. This allows
6485 * Scatter/Gather packet based DMA procedures for using physically
6486 * discontiguous pages.
6487 */
6488
6489/*
6490 * mgsl_reset_tx_dma_buffers()
6491 *
6492 * Set the count for all transmit buffers to 0 to indicate the
6493 * buffer is available for use and set the current buffer to the
6494 * first buffer. This effectively makes all buffers free and
6495 * discards any data in buffers.
6496 *
6497 * Arguments: info pointer to device instance data
6498 * Return Value: None
6499 */
6500static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
6501{
6502 unsigned int i;
6503
6504 for ( i = 0; i < info->tx_buffer_count; i++ ) {
6505 *((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
6506 }
6507
6508 info->current_tx_buffer = 0;
6509 info->start_tx_dma_buffer = 0;
6510 info->tx_dma_buffers_used = 0;
6511
6512 info->get_tx_holding_index = 0;
6513 info->put_tx_holding_index = 0;
6514 info->tx_holding_count = 0;
6515
6516} /* end of mgsl_reset_tx_dma_buffers() */
6517
6518/*
6519 * num_free_tx_dma_buffers()
6520 *
6521 * returns the number of free tx dma buffers available
6522 *
6523 * Arguments: info pointer to device instance data
6524 * Return Value: number of free tx dma buffers
6525 */
6526static int num_free_tx_dma_buffers(struct mgsl_struct *info)
6527{
6528 return info->tx_buffer_count - info->tx_dma_buffers_used;
6529}
6530
6531/*
6532 * mgsl_reset_rx_dma_buffers()
6533 *
6534 * Set the count for all receive buffers to DMABUFFERSIZE
6535 * and set the current buffer to the first buffer. This effectively
6536 * makes all buffers free and discards any data in buffers.
6537 *
6538 * Arguments: info pointer to device instance data
6539 * Return Value: None
6540 */
6541static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
6542{
6543 unsigned int i;
6544
6545 for ( i = 0; i < info->rx_buffer_count; i++ ) {
6546 *((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
6547// info->rx_buffer_list[i].count = DMABUFFERSIZE;
6548// info->rx_buffer_list[i].status = 0;
6549 }
6550
6551 info->current_rx_buffer = 0;
6552
6553} /* end of mgsl_reset_rx_dma_buffers() */
6554
6555/*
6556 * mgsl_free_rx_frame_buffers()
6557 *
6558 * Free the receive buffers used by a received SDLC
6559 * frame such that the buffers can be reused.
6560 *
6561 * Arguments:
6562 *
6563 * info pointer to device instance data
6564 * StartIndex index of 1st receive buffer of frame
6565 * EndIndex index of last receive buffer of frame
6566 *
6567 * Return Value: None
6568 */
6569static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
6570{
6571 int Done = 0;
6572 DMABUFFERENTRY *pBufEntry;
6573 unsigned int Index;
6574
6575 /* Starting with 1st buffer entry of the frame clear the status */
6576 /* field and set the count field to DMA Buffer Size. */
6577
6578 Index = StartIndex;
6579
6580 while( !Done ) {
6581 pBufEntry = &(info->rx_buffer_list[Index]);
6582
6583 if ( Index == EndIndex ) {
6584 /* This is the last buffer of the frame! */
6585 Done = 1;
6586 }
6587
6588 /* reset current buffer for reuse */
6589// pBufEntry->status = 0;
6590// pBufEntry->count = DMABUFFERSIZE;
6591 *((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
6592
6593 /* advance to next buffer entry in linked list */
6594 Index++;
6595 if ( Index == info->rx_buffer_count )
6596 Index = 0;
6597 }
6598
6599 /* set current buffer to next buffer after last buffer of frame */
6600 info->current_rx_buffer = Index;
6601
6602} /* end of free_rx_frame_buffers() */
6603
6604/* mgsl_get_rx_frame()
6605 *
6606 * This function attempts to return a received SDLC frame from the
6607 * receive DMA buffers. Only frames received without errors are returned.
6608 *
6609 * Arguments: info pointer to device extension
6610 * Return Value: 1 if frame returned, otherwise 0
6611 */
6612static int mgsl_get_rx_frame(struct mgsl_struct *info)
6613{
6614 unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
6615 unsigned short status;
6616 DMABUFFERENTRY *pBufEntry;
6617 unsigned int framesize = 0;
6618 int ReturnCode = 0;
6619 unsigned long flags;
6620 struct tty_struct *tty = info->tty;
6621 int return_frame = 0;
6622
6623 /*
6624 * current_rx_buffer points to the 1st buffer of the next available
6625 * receive frame. To find the last buffer of the frame look for
6626 * a non-zero status field in the buffer entries. (The status
6627 * field is set by the 16C32 after completing a receive frame.
6628 */
6629
6630 StartIndex = EndIndex = info->current_rx_buffer;
6631
6632 while( !info->rx_buffer_list[EndIndex].status ) {
6633 /*
6634 * If the count field of the buffer entry is non-zero then
6635 * this buffer has not been used. (The 16C32 clears the count
6636 * field when it starts using the buffer.) If an unused buffer
6637 * is encountered then there are no frames available.
6638 */
6639
6640 if ( info->rx_buffer_list[EndIndex].count )
6641 goto Cleanup;
6642
6643 /* advance to next buffer entry in linked list */
6644 EndIndex++;
6645 if ( EndIndex == info->rx_buffer_count )
6646 EndIndex = 0;
6647
6648 /* if entire list searched then no frame available */
6649 if ( EndIndex == StartIndex ) {
6650 /* If this occurs then something bad happened,
6651 * all buffers have been 'used' but none mark
6652 * the end of a frame. Reset buffers and receiver.
6653 */
6654
6655 if ( info->rx_enabled ){
6656 spin_lock_irqsave(&info->irq_spinlock,flags);
6657 usc_start_receiver(info);
6658 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6659 }
6660 goto Cleanup;
6661 }
6662 }
6663
6664
6665 /* check status of receive frame */
6666
6667 status = info->rx_buffer_list[EndIndex].status;
6668
6669 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6670 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6671 if ( status & RXSTATUS_SHORT_FRAME )
6672 info->icount.rxshort++;
6673 else if ( status & RXSTATUS_ABORT )
6674 info->icount.rxabort++;
6675 else if ( status & RXSTATUS_OVERRUN )
6676 info->icount.rxover++;
6677 else {
6678 info->icount.rxcrc++;
6679 if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
6680 return_frame = 1;
6681 }
6682 framesize = 0;
6683#ifdef CONFIG_HDLC
6684 {
6685 struct net_device_stats *stats = hdlc_stats(info->netdev);
6686 stats->rx_errors++;
6687 stats->rx_frame_errors++;
6688 }
6689#endif
6690 } else
6691 return_frame = 1;
6692
6693 if ( return_frame ) {
6694 /* receive frame has no errors, get frame size.
6695 * The frame size is the starting value of the RCC (which was
6696 * set to 0xffff) minus the ending value of the RCC (decremented
6697 * once for each receive character) minus 2 for the 16-bit CRC.
6698 */
6699
6700 framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
6701
6702 /* adjust frame size for CRC if any */
6703 if ( info->params.crc_type == HDLC_CRC_16_CCITT )
6704 framesize -= 2;
6705 else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
6706 framesize -= 4;
6707 }
6708
6709 if ( debug_level >= DEBUG_LEVEL_BH )
6710 printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
6711 __FILE__,__LINE__,info->device_name,status,framesize);
6712
6713 if ( debug_level >= DEBUG_LEVEL_DATA )
6714 mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
6715 min_t(int, framesize, DMABUFFERSIZE),0);
6716
6717 if (framesize) {
6718 if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
6719 ((framesize+1) > info->max_frame_size) ) ||
6720 (framesize > info->max_frame_size) )
6721 info->icount.rxlong++;
6722 else {
6723 /* copy dma buffer(s) to contiguous intermediate buffer */
6724 int copy_count = framesize;
6725 int index = StartIndex;
6726 unsigned char *ptmp = info->intermediate_rxbuffer;
6727
6728 if ( !(status & RXSTATUS_CRC_ERROR))
6729 info->icount.rxok++;
6730
6731 while(copy_count) {
6732 int partial_count;
6733 if ( copy_count > DMABUFFERSIZE )
6734 partial_count = DMABUFFERSIZE;
6735 else
6736 partial_count = copy_count;
6737
6738 pBufEntry = &(info->rx_buffer_list[index]);
6739 memcpy( ptmp, pBufEntry->virt_addr, partial_count );
6740 ptmp += partial_count;
6741 copy_count -= partial_count;
6742
6743 if ( ++index == info->rx_buffer_count )
6744 index = 0;
6745 }
6746
6747 if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
6748 ++framesize;
6749 *ptmp = (status & RXSTATUS_CRC_ERROR ?
6750 RX_CRC_ERROR :
6751 RX_OK);
6752
6753 if ( debug_level >= DEBUG_LEVEL_DATA )
6754 printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
6755 __FILE__,__LINE__,info->device_name,
6756 *ptmp);
6757 }
6758
6759#ifdef CONFIG_HDLC
6760 if (info->netcount)
6761 hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
6762 else
6763#endif
6764 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6765 }
6766 }
6767 /* Free the buffers used by this frame. */
6768 mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
6769
6770 ReturnCode = 1;
6771
6772Cleanup:
6773
6774 if ( info->rx_enabled && info->rx_overflow ) {
6775 /* The receiver needs to restarted because of
6776 * a receive overflow (buffer or FIFO). If the
6777 * receive buffers are now empty, then restart receiver.
6778 */
6779
6780 if ( !info->rx_buffer_list[EndIndex].status &&
6781 info->rx_buffer_list[EndIndex].count ) {
6782 spin_lock_irqsave(&info->irq_spinlock,flags);
6783 usc_start_receiver(info);
6784 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6785 }
6786 }
6787
6788 return ReturnCode;
6789
6790} /* end of mgsl_get_rx_frame() */
6791
6792/* mgsl_get_raw_rx_frame()
6793 *
6794 * This function attempts to return a received frame from the
6795 * receive DMA buffers when running in external loop mode. In this mode,
6796 * we will return at most one DMABUFFERSIZE frame to the application.
6797 * The USC receiver is triggering off of DCD going active to start a new
6798 * frame, and DCD going inactive to terminate the frame (similar to
6799 * processing a closing flag character).
6800 *
6801 * In this routine, we will return DMABUFFERSIZE "chunks" at a time.
6802 * If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
6803 * status field and the RCC field will indicate the length of the
6804 * entire received frame. We take this RCC field and get the modulus
6805 * of RCC and DMABUFFERSIZE to determine if number of bytes in the
6806 * last Rx DMA buffer and return that last portion of the frame.
6807 *
6808 * Arguments: info pointer to device extension
6809 * Return Value: 1 if frame returned, otherwise 0
6810 */
6811static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
6812{
6813 unsigned int CurrentIndex, NextIndex;
6814 unsigned short status;
6815 DMABUFFERENTRY *pBufEntry;
6816 unsigned int framesize = 0;
6817 int ReturnCode = 0;
6818 unsigned long flags;
6819 struct tty_struct *tty = info->tty;
6820
6821 /*
6822 * current_rx_buffer points to the 1st buffer of the next available
6823 * receive frame. The status field is set by the 16C32 after
6824 * completing a receive frame. If the status field of this buffer
6825 * is zero, either the USC is still filling this buffer or this
6826 * is one of a series of buffers making up a received frame.
6827 *
6828 * If the count field of this buffer is zero, the USC is either
6829 * using this buffer or has used this buffer. Look at the count
6830 * field of the next buffer. If that next buffer's count is
6831 * non-zero, the USC is still actively using the current buffer.
6832 * Otherwise, if the next buffer's count field is zero, the
6833 * current buffer is complete and the USC is using the next
6834 * buffer.
6835 */
6836 CurrentIndex = NextIndex = info->current_rx_buffer;
6837 ++NextIndex;
6838 if ( NextIndex == info->rx_buffer_count )
6839 NextIndex = 0;
6840
6841 if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
6842 (info->rx_buffer_list[CurrentIndex].count == 0 &&
6843 info->rx_buffer_list[NextIndex].count == 0)) {
6844 /*
6845 * Either the status field of this dma buffer is non-zero
6846 * (indicating the last buffer of a receive frame) or the next
6847 * buffer is marked as in use -- implying this buffer is complete
6848 * and an intermediate buffer for this received frame.
6849 */
6850
6851 status = info->rx_buffer_list[CurrentIndex].status;
6852
6853 if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
6854 RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
6855 if ( status & RXSTATUS_SHORT_FRAME )
6856 info->icount.rxshort++;
6857 else if ( status & RXSTATUS_ABORT )
6858 info->icount.rxabort++;
6859 else if ( status & RXSTATUS_OVERRUN )
6860 info->icount.rxover++;
6861 else
6862 info->icount.rxcrc++;
6863 framesize = 0;
6864 } else {
6865 /*
6866 * A receive frame is available, get frame size and status.
6867 *
6868 * The frame size is the starting value of the RCC (which was
6869 * set to 0xffff) minus the ending value of the RCC (decremented
6870 * once for each receive character) minus 2 or 4 for the 16-bit
6871 * or 32-bit CRC.
6872 *
6873 * If the status field is zero, this is an intermediate buffer.
6874 * It's size is 4K.
6875 *
6876 * If the DMA Buffer Entry's Status field is non-zero, the
6877 * receive operation completed normally (ie: DCD dropped). The
6878 * RCC field is valid and holds the received frame size.
6879 * It is possible that the RCC field will be zero on a DMA buffer
6880 * entry with a non-zero status. This can occur if the total
6881 * frame size (number of bytes between the time DCD goes active
6882 * to the time DCD goes inactive) exceeds 65535 bytes. In this
6883 * case the 16C32 has underrun on the RCC count and appears to
6884 * stop updating this counter to let us know the actual received
6885 * frame size. If this happens (non-zero status and zero RCC),
6886 * simply return the entire RxDMA Buffer
6887 */
6888 if ( status ) {
6889 /*
6890 * In the event that the final RxDMA Buffer is
6891 * terminated with a non-zero status and the RCC
6892 * field is zero, we interpret this as the RCC
6893 * having underflowed (received frame > 65535 bytes).
6894 *
6895 * Signal the event to the user by passing back
6896 * a status of RxStatus_CrcError returning the full
6897 * buffer and let the app figure out what data is
6898 * actually valid
6899 */
6900 if ( info->rx_buffer_list[CurrentIndex].rcc )
6901 framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
6902 else
6903 framesize = DMABUFFERSIZE;
6904 }
6905 else
6906 framesize = DMABUFFERSIZE;
6907 }
6908
6909 if ( framesize > DMABUFFERSIZE ) {
6910 /*
6911 * if running in raw sync mode, ISR handler for
6912 * End Of Buffer events terminates all buffers at 4K.
6913 * If this frame size is said to be >4K, get the
6914 * actual number of bytes of the frame in this buffer.
6915 */
6916 framesize = framesize % DMABUFFERSIZE;
6917 }
6918
6919
6920 if ( debug_level >= DEBUG_LEVEL_BH )
6921 printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
6922 __FILE__,__LINE__,info->device_name,status,framesize);
6923
6924 if ( debug_level >= DEBUG_LEVEL_DATA )
6925 mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
6926 min_t(int, framesize, DMABUFFERSIZE),0);
6927
6928 if (framesize) {
6929 /* copy dma buffer(s) to contiguous intermediate buffer */
6930 /* NOTE: we never copy more than DMABUFFERSIZE bytes */
6931
6932 pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
6933 memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
6934 info->icount.rxok++;
6935
6936 ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
6937 }
6938
6939 /* Free the buffers used by this frame. */
6940 mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
6941
6942 ReturnCode = 1;
6943 }
6944
6945
6946 if ( info->rx_enabled && info->rx_overflow ) {
6947 /* The receiver needs to restarted because of
6948 * a receive overflow (buffer or FIFO). If the
6949 * receive buffers are now empty, then restart receiver.
6950 */
6951
6952 if ( !info->rx_buffer_list[CurrentIndex].status &&
6953 info->rx_buffer_list[CurrentIndex].count ) {
6954 spin_lock_irqsave(&info->irq_spinlock,flags);
6955 usc_start_receiver(info);
6956 spin_unlock_irqrestore(&info->irq_spinlock,flags);
6957 }
6958 }
6959
6960 return ReturnCode;
6961
6962} /* end of mgsl_get_raw_rx_frame() */
6963
6964/* mgsl_load_tx_dma_buffer()
6965 *
6966 * Load the transmit DMA buffer with the specified data.
6967 *
6968 * Arguments:
6969 *
6970 * info pointer to device extension
6971 * Buffer pointer to buffer containing frame to load
6972 * BufferSize size in bytes of frame in Buffer
6973 *
6974 * Return Value: None
6975 */
6976static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
6977 const char *Buffer, unsigned int BufferSize)
6978{
6979 unsigned short Copycount;
6980 unsigned int i = 0;
6981 DMABUFFERENTRY *pBufEntry;
6982
6983 if ( debug_level >= DEBUG_LEVEL_DATA )
6984 mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
6985
6986 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
6987 /* set CMR:13 to start transmit when
6988 * next GoAhead (abort) is received
6989 */
6990 info->cmr_value |= BIT13;
6991 }
6992
6993 /* begin loading the frame in the next available tx dma
6994 * buffer, remember it's starting location for setting
6995 * up tx dma operation
6996 */
6997 i = info->current_tx_buffer;
6998 info->start_tx_dma_buffer = i;
6999
7000 /* Setup the status and RCC (Frame Size) fields of the 1st */
7001 /* buffer entry in the transmit DMA buffer list. */
7002
7003 info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
7004 info->tx_buffer_list[i].rcc = BufferSize;
7005 info->tx_buffer_list[i].count = BufferSize;
7006
7007 /* Copy frame data from 1st source buffer to the DMA buffers. */
7008 /* The frame data may span multiple DMA buffers. */
7009
7010 while( BufferSize ){
7011 /* Get a pointer to next DMA buffer entry. */
7012 pBufEntry = &info->tx_buffer_list[i++];
7013
7014 if ( i == info->tx_buffer_count )
7015 i=0;
7016
7017 /* Calculate the number of bytes that can be copied from */
7018 /* the source buffer to this DMA buffer. */
7019 if ( BufferSize > DMABUFFERSIZE )
7020 Copycount = DMABUFFERSIZE;
7021 else
7022 Copycount = BufferSize;
7023
7024 /* Actually copy data from source buffer to DMA buffer. */
7025 /* Also set the data count for this individual DMA buffer. */
7026 if ( info->bus_type == MGSL_BUS_TYPE_PCI )
7027 mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
7028 else
7029 memcpy(pBufEntry->virt_addr, Buffer, Copycount);
7030
7031 pBufEntry->count = Copycount;
7032
7033 /* Advance source pointer and reduce remaining data count. */
7034 Buffer += Copycount;
7035 BufferSize -= Copycount;
7036
7037 ++info->tx_dma_buffers_used;
7038 }
7039
7040 /* remember next available tx dma buffer */
7041 info->current_tx_buffer = i;
7042
7043} /* end of mgsl_load_tx_dma_buffer() */
7044
7045/*
7046 * mgsl_register_test()
7047 *
7048 * Performs a register test of the 16C32.
7049 *
7050 * Arguments: info pointer to device instance data
7051 * Return Value: TRUE if test passed, otherwise FALSE
7052 */
7053static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
7054{
7055 static unsigned short BitPatterns[] =
7056 { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
7057 static unsigned int Patterncount = sizeof(BitPatterns)/sizeof(unsigned short);
7058 unsigned int i;
7059 BOOLEAN rc = TRUE;
7060 unsigned long flags;
7061
7062 spin_lock_irqsave(&info->irq_spinlock,flags);
7063 usc_reset(info);
7064
7065 /* Verify the reset state of some registers. */
7066
7067 if ( (usc_InReg( info, SICR ) != 0) ||
7068 (usc_InReg( info, IVR ) != 0) ||
7069 (usc_InDmaReg( info, DIVR ) != 0) ){
7070 rc = FALSE;
7071 }
7072
7073 if ( rc == TRUE ){
7074 /* Write bit patterns to various registers but do it out of */
7075 /* sync, then read back and verify values. */
7076
7077 for ( i = 0 ; i < Patterncount ; i++ ) {
7078 usc_OutReg( info, TC0R, BitPatterns[i] );
7079 usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
7080 usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
7081 usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
7082 usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
7083 usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
7084
7085 if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
7086 (usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
7087 (usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
7088 (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
7089 (usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
7090 (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
7091 rc = FALSE;
7092 break;
7093 }
7094 }
7095 }
7096
7097 usc_reset(info);
7098 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7099
7100 return rc;
7101
7102} /* end of mgsl_register_test() */
7103
7104/* mgsl_irq_test() Perform interrupt test of the 16C32.
7105 *
7106 * Arguments: info pointer to device instance data
7107 * Return Value: TRUE if test passed, otherwise FALSE
7108 */
7109static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
7110{
7111 unsigned long EndTime;
7112 unsigned long flags;
7113
7114 spin_lock_irqsave(&info->irq_spinlock,flags);
7115 usc_reset(info);
7116
7117 /*
7118 * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
7119 * The ISR sets irq_occurred to 1.
7120 */
7121
7122 info->irq_occurred = FALSE;
7123
7124 /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
7125 /* Enable INTEN (Port 6, Bit12) */
7126 /* This connects the IRQ request signal to the ISA bus */
7127 /* on the ISA adapter. This has no effect for the PCI adapter */
7128 usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
7129
7130 usc_EnableMasterIrqBit(info);
7131 usc_EnableInterrupts(info, IO_PIN);
7132 usc_ClearIrqPendingBits(info, IO_PIN);
7133
7134 usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
7135 usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
7136
7137 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7138
7139 EndTime=100;
7140 while( EndTime-- && !info->irq_occurred ) {
7141 msleep_interruptible(10);
7142 }
7143
7144 spin_lock_irqsave(&info->irq_spinlock,flags);
7145 usc_reset(info);
7146 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7147
7148 if ( !info->irq_occurred )
7149 return FALSE;
7150 else
7151 return TRUE;
7152
7153} /* end of mgsl_irq_test() */
7154
7155/* mgsl_dma_test()
7156 *
7157 * Perform a DMA test of the 16C32. A small frame is
7158 * transmitted via DMA from a transmit buffer to a receive buffer
7159 * using single buffer DMA mode.
7160 *
7161 * Arguments: info pointer to device instance data
7162 * Return Value: TRUE if test passed, otherwise FALSE
7163 */
7164static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
7165{
7166 unsigned short FifoLevel;
7167 unsigned long phys_addr;
7168 unsigned int FrameSize;
7169 unsigned int i;
7170 char *TmpPtr;
7171 BOOLEAN rc = TRUE;
7172 unsigned short status=0;
7173 unsigned long EndTime;
7174 unsigned long flags;
7175 MGSL_PARAMS tmp_params;
7176
7177 /* save current port options */
7178 memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
7179 /* load default port options */
7180 memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
7181
7182#define TESTFRAMESIZE 40
7183
7184 spin_lock_irqsave(&info->irq_spinlock,flags);
7185
7186 /* setup 16C32 for SDLC DMA transfer mode */
7187
7188 usc_reset(info);
7189 usc_set_sdlc_mode(info);
7190 usc_enable_loopback(info,1);
7191
7192 /* Reprogram the RDMR so that the 16C32 does NOT clear the count
7193 * field of the buffer entry after fetching buffer address. This
7194 * way we can detect a DMA failure for a DMA read (which should be
7195 * non-destructive to system memory) before we try and write to
7196 * memory (where a failure could corrupt system memory).
7197 */
7198
7199 /* Receive DMA mode Register (RDMR)
7200 *
7201 * <15..14> 11 DMA mode = Linked List Buffer mode
7202 * <13> 1 RSBinA/L = store Rx status Block in List entry
7203 * <12> 0 1 = Clear count of List Entry after fetching
7204 * <11..10> 00 Address mode = Increment
7205 * <9> 1 Terminate Buffer on RxBound
7206 * <8> 0 Bus Width = 16bits
7207 * <7..0> ? status Bits (write as 0s)
7208 *
7209 * 1110 0010 0000 0000 = 0xe200
7210 */
7211
7212 usc_OutDmaReg( info, RDMR, 0xe200 );
7213
7214 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7215
7216
7217 /* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
7218
7219 FrameSize = TESTFRAMESIZE;
7220
7221 /* setup 1st transmit buffer entry: */
7222 /* with frame size and transmit control word */
7223
7224 info->tx_buffer_list[0].count = FrameSize;
7225 info->tx_buffer_list[0].rcc = FrameSize;
7226 info->tx_buffer_list[0].status = 0x4000;
7227
7228 /* build a transmit frame in 1st transmit DMA buffer */
7229
7230 TmpPtr = info->tx_buffer_list[0].virt_addr;
7231 for (i = 0; i < FrameSize; i++ )
7232 *TmpPtr++ = i;
7233
7234 /* setup 1st receive buffer entry: */
7235 /* clear status, set max receive buffer size */
7236
7237 info->rx_buffer_list[0].status = 0;
7238 info->rx_buffer_list[0].count = FrameSize + 4;
7239
7240 /* zero out the 1st receive buffer */
7241
7242 memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
7243
7244 /* Set count field of next buffer entries to prevent */
7245 /* 16C32 from using buffers after the 1st one. */
7246
7247 info->tx_buffer_list[1].count = 0;
7248 info->rx_buffer_list[1].count = 0;
7249
7250
7251 /***************************/
7252 /* Program 16C32 receiver. */
7253 /***************************/
7254
7255 spin_lock_irqsave(&info->irq_spinlock,flags);
7256
7257 /* setup DMA transfers */
7258 usc_RTCmd( info, RTCmd_PurgeRxFifo );
7259
7260 /* program 16C32 receiver with physical address of 1st DMA buffer entry */
7261 phys_addr = info->rx_buffer_list[0].phys_entry;
7262 usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
7263 usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
7264
7265 /* Clear the Rx DMA status bits (read RDMR) and start channel */
7266 usc_InDmaReg( info, RDMR );
7267 usc_DmaCmd( info, DmaCmd_InitRxChannel );
7268
7269 /* Enable Receiver (RMR <1..0> = 10) */
7270 usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
7271
7272 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7273
7274
7275 /*************************************************************/
7276 /* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
7277 /*************************************************************/
7278
7279 /* Wait 100ms for interrupt. */
7280 EndTime = jiffies + msecs_to_jiffies(100);
7281
7282 for(;;) {
7283 if (time_after(jiffies, EndTime)) {
7284 rc = FALSE;
7285 break;
7286 }
7287
7288 spin_lock_irqsave(&info->irq_spinlock,flags);
7289 status = usc_InDmaReg( info, RDMR );
7290 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7291
7292 if ( !(status & BIT4) && (status & BIT5) ) {
7293 /* INITG (BIT 4) is inactive (no entry read in progress) AND */
7294 /* BUSY (BIT 5) is active (channel still active). */
7295 /* This means the buffer entry read has completed. */
7296 break;
7297 }
7298 }
7299
7300
7301 /******************************/
7302 /* Program 16C32 transmitter. */
7303 /******************************/
7304
7305 spin_lock_irqsave(&info->irq_spinlock,flags);
7306
7307 /* Program the Transmit Character Length Register (TCLR) */
7308 /* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
7309
7310 usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
7311 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7312
7313 /* Program the address of the 1st DMA Buffer Entry in linked list */
7314
7315 phys_addr = info->tx_buffer_list[0].phys_entry;
7316 usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
7317 usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
7318
7319 /* unlatch Tx status bits, and start transmit channel. */
7320
7321 usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
7322 usc_DmaCmd( info, DmaCmd_InitTxChannel );
7323
7324 /* wait for DMA controller to fill transmit FIFO */
7325
7326 usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
7327
7328 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7329
7330
7331 /**********************************/
7332 /* WAIT FOR TRANSMIT FIFO TO FILL */
7333 /**********************************/
7334
7335 /* Wait 100ms */
7336 EndTime = jiffies + msecs_to_jiffies(100);
7337
7338 for(;;) {
7339 if (time_after(jiffies, EndTime)) {
7340 rc = FALSE;
7341 break;
7342 }
7343
7344 spin_lock_irqsave(&info->irq_spinlock,flags);
7345 FifoLevel = usc_InReg(info, TICR) >> 8;
7346 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7347
7348 if ( FifoLevel < 16 )
7349 break;
7350 else
7351 if ( FrameSize < 32 ) {
7352 /* This frame is smaller than the entire transmit FIFO */
7353 /* so wait for the entire frame to be loaded. */
7354 if ( FifoLevel <= (32 - FrameSize) )
7355 break;
7356 }
7357 }
7358
7359
7360 if ( rc == TRUE )
7361 {
7362 /* Enable 16C32 transmitter. */
7363
7364 spin_lock_irqsave(&info->irq_spinlock,flags);
7365
7366 /* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
7367 usc_TCmd( info, TCmd_SendFrame );
7368 usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
7369
7370 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7371
7372
7373 /******************************/
7374 /* WAIT FOR TRANSMIT COMPLETE */
7375 /******************************/
7376
7377 /* Wait 100ms */
7378 EndTime = jiffies + msecs_to_jiffies(100);
7379
7380 /* While timer not expired wait for transmit complete */
7381
7382 spin_lock_irqsave(&info->irq_spinlock,flags);
7383 status = usc_InReg( info, TCSR );
7384 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7385
7386 while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
7387 if (time_after(jiffies, EndTime)) {
7388 rc = FALSE;
7389 break;
7390 }
7391
7392 spin_lock_irqsave(&info->irq_spinlock,flags);
7393 status = usc_InReg( info, TCSR );
7394 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7395 }
7396 }
7397
7398
7399 if ( rc == TRUE ){
7400 /* CHECK FOR TRANSMIT ERRORS */
7401 if ( status & (BIT5 + BIT1) )
7402 rc = FALSE;
7403 }
7404
7405 if ( rc == TRUE ) {
7406 /* WAIT FOR RECEIVE COMPLETE */
7407
7408 /* Wait 100ms */
7409 EndTime = jiffies + msecs_to_jiffies(100);
7410
7411 /* Wait for 16C32 to write receive status to buffer entry. */
7412 status=info->rx_buffer_list[0].status;
7413 while ( status == 0 ) {
7414 if (time_after(jiffies, EndTime)) {
7415 rc = FALSE;
7416 break;
7417 }
7418 status=info->rx_buffer_list[0].status;
7419 }
7420 }
7421
7422
7423 if ( rc == TRUE ) {
7424 /* CHECK FOR RECEIVE ERRORS */
7425 status = info->rx_buffer_list[0].status;
7426
7427 if ( status & (BIT8 + BIT3 + BIT1) ) {
7428 /* receive error has occurred */
7429 rc = FALSE;
7430 } else {
7431 if ( memcmp( info->tx_buffer_list[0].virt_addr ,
7432 info->rx_buffer_list[0].virt_addr, FrameSize ) ){
7433 rc = FALSE;
7434 }
7435 }
7436 }
7437
7438 spin_lock_irqsave(&info->irq_spinlock,flags);
7439 usc_reset( info );
7440 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7441
7442 /* restore current port options */
7443 memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
7444
7445 return rc;
7446
7447} /* end of mgsl_dma_test() */
7448
7449/* mgsl_adapter_test()
7450 *
7451 * Perform the register, IRQ, and DMA tests for the 16C32.
7452 *
7453 * Arguments: info pointer to device instance data
7454 * Return Value: 0 if success, otherwise -ENODEV
7455 */
7456static int mgsl_adapter_test( struct mgsl_struct *info )
7457{
7458 if ( debug_level >= DEBUG_LEVEL_INFO )
7459 printk( "%s(%d):Testing device %s\n",
7460 __FILE__,__LINE__,info->device_name );
7461
7462 if ( !mgsl_register_test( info ) ) {
7463 info->init_error = DiagStatus_AddressFailure;
7464 printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
7465 __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
7466 return -ENODEV;
7467 }
7468
7469 if ( !mgsl_irq_test( info ) ) {
7470 info->init_error = DiagStatus_IrqFailure;
7471 printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
7472 __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
7473 return -ENODEV;
7474 }
7475
7476 if ( !mgsl_dma_test( info ) ) {
7477 info->init_error = DiagStatus_DmaFailure;
7478 printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
7479 __FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
7480 return -ENODEV;
7481 }
7482
7483 if ( debug_level >= DEBUG_LEVEL_INFO )
7484 printk( "%s(%d):device %s passed diagnostics\n",
7485 __FILE__,__LINE__,info->device_name );
7486
7487 return 0;
7488
7489} /* end of mgsl_adapter_test() */
7490
7491/* mgsl_memory_test()
7492 *
7493 * Test the shared memory on a PCI adapter.
7494 *
7495 * Arguments: info pointer to device instance data
7496 * Return Value: TRUE if test passed, otherwise FALSE
7497 */
7498static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
7499{
7500 static unsigned long BitPatterns[] = { 0x0, 0x55555555, 0xaaaaaaaa,
7501 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
7502 unsigned long Patterncount = sizeof(BitPatterns)/sizeof(unsigned long);
7503 unsigned long i;
7504 unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
7505 unsigned long * TestAddr;
7506
7507 if ( info->bus_type != MGSL_BUS_TYPE_PCI )
7508 return TRUE;
7509
7510 TestAddr = (unsigned long *)info->memory_base;
7511
7512 /* Test data lines with test pattern at one location. */
7513
7514 for ( i = 0 ; i < Patterncount ; i++ ) {
7515 *TestAddr = BitPatterns[i];
7516 if ( *TestAddr != BitPatterns[i] )
7517 return FALSE;
7518 }
7519
7520 /* Test address lines with incrementing pattern over */
7521 /* entire address range. */
7522
7523 for ( i = 0 ; i < TestLimit ; i++ ) {
7524 *TestAddr = i * 4;
7525 TestAddr++;
7526 }
7527
7528 TestAddr = (unsigned long *)info->memory_base;
7529
7530 for ( i = 0 ; i < TestLimit ; i++ ) {
7531 if ( *TestAddr != i * 4 )
7532 return FALSE;
7533 TestAddr++;
7534 }
7535
7536 memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
7537
7538 return TRUE;
7539
7540} /* End Of mgsl_memory_test() */
7541
7542
7543/* mgsl_load_pci_memory()
7544 *
7545 * Load a large block of data into the PCI shared memory.
7546 * Use this instead of memcpy() or memmove() to move data
7547 * into the PCI shared memory.
7548 *
7549 * Notes:
7550 *
7551 * This function prevents the PCI9050 interface chip from hogging
7552 * the adapter local bus, which can starve the 16C32 by preventing
7553 * 16C32 bus master cycles.
7554 *
7555 * The PCI9050 documentation says that the 9050 will always release
7556 * control of the local bus after completing the current read
7557 * or write operation.
7558 *
7559 * It appears that as long as the PCI9050 write FIFO is full, the
7560 * PCI9050 treats all of the writes as a single burst transaction
7561 * and will not release the bus. This causes DMA latency problems
7562 * at high speeds when copying large data blocks to the shared
7563 * memory.
7564 *
7565 * This function in effect, breaks the a large shared memory write
7566 * into multiple transations by interleaving a shared memory read
7567 * which will flush the write FIFO and 'complete' the write
7568 * transation. This allows any pending DMA request to gain control
7569 * of the local bus in a timely fasion.
7570 *
7571 * Arguments:
7572 *
7573 * TargetPtr pointer to target address in PCI shared memory
7574 * SourcePtr pointer to source buffer for data
7575 * count count in bytes of data to copy
7576 *
7577 * Return Value: None
7578 */
7579static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
7580 unsigned short count )
7581{
7582 /* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
7583#define PCI_LOAD_INTERVAL 64
7584
7585 unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
7586 unsigned short Index;
7587 unsigned long Dummy;
7588
7589 for ( Index = 0 ; Index < Intervalcount ; Index++ )
7590 {
7591 memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
7592 Dummy = *((volatile unsigned long *)TargetPtr);
7593 TargetPtr += PCI_LOAD_INTERVAL;
7594 SourcePtr += PCI_LOAD_INTERVAL;
7595 }
7596
7597 memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
7598
7599} /* End Of mgsl_load_pci_memory() */
7600
7601static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
7602{
7603 int i;
7604 int linecount;
7605 if (xmit)
7606 printk("%s tx data:\n",info->device_name);
7607 else
7608 printk("%s rx data:\n",info->device_name);
7609
7610 while(count) {
7611 if (count > 16)
7612 linecount = 16;
7613 else
7614 linecount = count;
7615
7616 for(i=0;i<linecount;i++)
7617 printk("%02X ",(unsigned char)data[i]);
7618 for(;i<17;i++)
7619 printk(" ");
7620 for(i=0;i<linecount;i++) {
7621 if (data[i]>=040 && data[i]<=0176)
7622 printk("%c",data[i]);
7623 else
7624 printk(".");
7625 }
7626 printk("\n");
7627
7628 data += linecount;
7629 count -= linecount;
7630 }
7631} /* end of mgsl_trace_block() */
7632
7633/* mgsl_tx_timeout()
7634 *
7635 * called when HDLC frame times out
7636 * update stats and do tx completion processing
7637 *
7638 * Arguments: context pointer to device instance data
7639 * Return Value: None
7640 */
7641static void mgsl_tx_timeout(unsigned long context)
7642{
7643 struct mgsl_struct *info = (struct mgsl_struct*)context;
7644 unsigned long flags;
7645
7646 if ( debug_level >= DEBUG_LEVEL_INFO )
7647 printk( "%s(%d):mgsl_tx_timeout(%s)\n",
7648 __FILE__,__LINE__,info->device_name);
7649 if(info->tx_active &&
7650 (info->params.mode == MGSL_MODE_HDLC ||
7651 info->params.mode == MGSL_MODE_RAW) ) {
7652 info->icount.txtimeout++;
7653 }
7654 spin_lock_irqsave(&info->irq_spinlock,flags);
7655 info->tx_active = 0;
7656 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
7657
7658 if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
7659 usc_loopmode_cancel_transmit( info );
7660
7661 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7662
7663#ifdef CONFIG_HDLC
7664 if (info->netcount)
7665 hdlcdev_tx_done(info);
7666 else
7667#endif
7668 mgsl_bh_transmit(info);
7669
7670} /* end of mgsl_tx_timeout() */
7671
7672/* signal that there are no more frames to send, so that
7673 * line is 'released' by echoing RxD to TxD when current
7674 * transmission is complete (or immediately if no tx in progress).
7675 */
7676static int mgsl_loopmode_send_done( struct mgsl_struct * info )
7677{
7678 unsigned long flags;
7679
7680 spin_lock_irqsave(&info->irq_spinlock,flags);
7681 if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
7682 if (info->tx_active)
7683 info->loopmode_send_done_requested = TRUE;
7684 else
7685 usc_loopmode_send_done(info);
7686 }
7687 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7688
7689 return 0;
7690}
7691
7692/* release the line by echoing RxD to TxD
7693 * upon completion of a transmit frame
7694 */
7695static void usc_loopmode_send_done( struct mgsl_struct * info )
7696{
7697 info->loopmode_send_done_requested = FALSE;
7698 /* clear CMR:13 to 0 to start echoing RxData to TxData */
7699 info->cmr_value &= ~BIT13;
7700 usc_OutReg(info, CMR, info->cmr_value);
7701}
7702
7703/* abort a transmit in progress while in HDLC LoopMode
7704 */
7705static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
7706{
7707 /* reset tx dma channel and purge TxFifo */
7708 usc_RTCmd( info, RTCmd_PurgeTxFifo );
7709 usc_DmaCmd( info, DmaCmd_ResetTxChannel );
7710 usc_loopmode_send_done( info );
7711}
7712
7713/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
7714 * is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
7715 * we must clear CMR:13 to begin repeating TxData to RxData
7716 */
7717static void usc_loopmode_insert_request( struct mgsl_struct * info )
7718{
7719 info->loopmode_insert_requested = TRUE;
7720
7721 /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
7722 * begin repeating TxData on RxData (complete insertion)
7723 */
7724 usc_OutReg( info, RICR,
7725 (usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
7726
7727 /* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
7728 info->cmr_value |= BIT13;
7729 usc_OutReg(info, CMR, info->cmr_value);
7730}
7731
7732/* return 1 if station is inserted into the loop, otherwise 0
7733 */
7734static int usc_loopmode_active( struct mgsl_struct * info)
7735{
7736 return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
7737}
7738
7739#ifdef CONFIG_HDLC
7740
7741/**
7742 * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
7743 * set encoding and frame check sequence (FCS) options
7744 *
7745 * dev pointer to network device structure
7746 * encoding serial encoding setting
7747 * parity FCS setting
7748 *
7749 * returns 0 if success, otherwise error code
7750 */
7751static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
7752 unsigned short parity)
7753{
7754 struct mgsl_struct *info = dev_to_port(dev);
7755 unsigned char new_encoding;
7756 unsigned short new_crctype;
7757
7758 /* return error if TTY interface open */
7759 if (info->count)
7760 return -EBUSY;
7761
7762 switch (encoding)
7763 {
7764 case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
7765 case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
7766 case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
7767 case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
7768 case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
7769 default: return -EINVAL;
7770 }
7771
7772 switch (parity)
7773 {
7774 case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
7775 case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
7776 case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
7777 default: return -EINVAL;
7778 }
7779
7780 info->params.encoding = new_encoding;
7781 info->params.crc_type = new_crctype;;
7782
7783 /* if network interface up, reprogram hardware */
7784 if (info->netcount)
7785 mgsl_program_hw(info);
7786
7787 return 0;
7788}
7789
7790/**
7791 * called by generic HDLC layer to send frame
7792 *
7793 * skb socket buffer containing HDLC frame
7794 * dev pointer to network device structure
7795 *
7796 * returns 0 if success, otherwise error code
7797 */
7798static int hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev)
7799{
7800 struct mgsl_struct *info = dev_to_port(dev);
7801 struct net_device_stats *stats = hdlc_stats(dev);
7802 unsigned long flags;
7803
7804 if (debug_level >= DEBUG_LEVEL_INFO)
7805 printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
7806
7807 /* stop sending until this frame completes */
7808 netif_stop_queue(dev);
7809
7810 /* copy data to device buffers */
7811 info->xmit_cnt = skb->len;
7812 mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
7813
7814 /* update network statistics */
7815 stats->tx_packets++;
7816 stats->tx_bytes += skb->len;
7817
7818 /* done with socket buffer, so free it */
7819 dev_kfree_skb(skb);
7820
7821 /* save start time for transmit timeout detection */
7822 dev->trans_start = jiffies;
7823
7824 /* start hardware transmitter if necessary */
7825 spin_lock_irqsave(&info->irq_spinlock,flags);
7826 if (!info->tx_active)
7827 usc_start_transmitter(info);
7828 spin_unlock_irqrestore(&info->irq_spinlock,flags);
7829
7830 return 0;
7831}
7832
7833/**
7834 * called by network layer when interface enabled
7835 * claim resources and initialize hardware
7836 *
7837 * dev pointer to network device structure
7838 *
7839 * returns 0 if success, otherwise error code
7840 */
7841static int hdlcdev_open(struct net_device *dev)
7842{
7843 struct mgsl_struct *info = dev_to_port(dev);
7844 int rc;
7845 unsigned long flags;
7846
7847 if (debug_level >= DEBUG_LEVEL_INFO)
7848 printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
7849
7850 /* generic HDLC layer open processing */
7851 if ((rc = hdlc_open(dev)))
7852 return rc;
7853
7854 /* arbitrate between network and tty opens */
7855 spin_lock_irqsave(&info->netlock, flags);
7856 if (info->count != 0 || info->netcount != 0) {
7857 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
7858 spin_unlock_irqrestore(&info->netlock, flags);
7859 return -EBUSY;
7860 }
7861 info->netcount=1;
7862 spin_unlock_irqrestore(&info->netlock, flags);
7863
7864 /* claim resources and init adapter */
7865 if ((rc = startup(info)) != 0) {
7866 spin_lock_irqsave(&info->netlock, flags);
7867 info->netcount=0;
7868 spin_unlock_irqrestore(&info->netlock, flags);
7869 return rc;
7870 }
7871
7872 /* assert DTR and RTS, apply hardware settings */
7873 info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR;
7874 mgsl_program_hw(info);
7875
7876 /* enable network layer transmit */
7877 dev->trans_start = jiffies;
7878 netif_start_queue(dev);
7879
7880 /* inform generic HDLC layer of current DCD status */
7881 spin_lock_irqsave(&info->irq_spinlock, flags);
7882 usc_get_serial_signals(info);
7883 spin_unlock_irqrestore(&info->irq_spinlock, flags);
7884 hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev);
7885
7886 return 0;
7887}
7888
7889/**
7890 * called by network layer when interface is disabled
7891 * shutdown hardware and release resources
7892 *
7893 * dev pointer to network device structure
7894 *
7895 * returns 0 if success, otherwise error code
7896 */
7897static int hdlcdev_close(struct net_device *dev)
7898{
7899 struct mgsl_struct *info = dev_to_port(dev);
7900 unsigned long flags;
7901
7902 if (debug_level >= DEBUG_LEVEL_INFO)
7903 printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
7904
7905 netif_stop_queue(dev);
7906
7907 /* shutdown adapter and release resources */
7908 shutdown(info);
7909
7910 hdlc_close(dev);
7911
7912 spin_lock_irqsave(&info->netlock, flags);
7913 info->netcount=0;
7914 spin_unlock_irqrestore(&info->netlock, flags);
7915
7916 return 0;
7917}
7918
7919/**
7920 * called by network layer to process IOCTL call to network device
7921 *
7922 * dev pointer to network device structure
7923 * ifr pointer to network interface request structure
7924 * cmd IOCTL command code
7925 *
7926 * returns 0 if success, otherwise error code
7927 */
7928static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7929{
7930 const size_t size = sizeof(sync_serial_settings);
7931 sync_serial_settings new_line;
7932 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
7933 struct mgsl_struct *info = dev_to_port(dev);
7934 unsigned int flags;
7935
7936 if (debug_level >= DEBUG_LEVEL_INFO)
7937 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
7938
7939 /* return error if TTY interface open */
7940 if (info->count)
7941 return -EBUSY;
7942
7943 if (cmd != SIOCWANDEV)
7944 return hdlc_ioctl(dev, ifr, cmd);
7945
7946 switch(ifr->ifr_settings.type) {
7947 case IF_GET_IFACE: /* return current sync_serial_settings */
7948
7949 ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
7950 if (ifr->ifr_settings.size < size) {
7951 ifr->ifr_settings.size = size; /* data size wanted */
7952 return -ENOBUFS;
7953 }
7954
7955 flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7956 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7957 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7958 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
7959
7960 switch (flags){
7961 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
7962 case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
7963 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
7964 case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
7965 default: new_line.clock_type = CLOCK_DEFAULT;
7966 }
7967
7968 new_line.clock_rate = info->params.clock_speed;
7969 new_line.loopback = info->params.loopback ? 1:0;
7970
7971 if (copy_to_user(line, &new_line, size))
7972 return -EFAULT;
7973 return 0;
7974
7975 case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
7976
7977 if(!capable(CAP_NET_ADMIN))
7978 return -EPERM;
7979 if (copy_from_user(&new_line, line, size))
7980 return -EFAULT;
7981
7982 switch (new_line.clock_type)
7983 {
7984 case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
7985 case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
7986 case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
7987 case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
7988 case CLOCK_DEFAULT: flags = info->params.flags &
7989 (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
7990 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
7991 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
7992 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
7993 default: return -EINVAL;
7994 }
7995
7996 if (new_line.loopback != 0 && new_line.loopback != 1)
7997 return -EINVAL;
7998
7999 info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
8000 HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
8001 HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
8002 HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
8003 info->params.flags |= flags;
8004
8005 info->params.loopback = new_line.loopback;
8006
8007 if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
8008 info->params.clock_speed = new_line.clock_rate;
8009 else
8010 info->params.clock_speed = 0;
8011
8012 /* if network interface up, reprogram hardware */
8013 if (info->netcount)
8014 mgsl_program_hw(info);
8015 return 0;
8016
8017 default:
8018 return hdlc_ioctl(dev, ifr, cmd);
8019 }
8020}
8021
8022/**
8023 * called by network layer when transmit timeout is detected
8024 *
8025 * dev pointer to network device structure
8026 */
8027static void hdlcdev_tx_timeout(struct net_device *dev)
8028{
8029 struct mgsl_struct *info = dev_to_port(dev);
8030 struct net_device_stats *stats = hdlc_stats(dev);
8031 unsigned long flags;
8032
8033 if (debug_level >= DEBUG_LEVEL_INFO)
8034 printk("hdlcdev_tx_timeout(%s)\n",dev->name);
8035
8036 stats->tx_errors++;
8037 stats->tx_aborted_errors++;
8038
8039 spin_lock_irqsave(&info->irq_spinlock,flags);
8040 usc_stop_transmitter(info);
8041 spin_unlock_irqrestore(&info->irq_spinlock,flags);
8042
8043 netif_wake_queue(dev);
8044}
8045
8046/**
8047 * called by device driver when transmit completes
8048 * reenable network layer transmit if stopped
8049 *
8050 * info pointer to device instance information
8051 */
8052static void hdlcdev_tx_done(struct mgsl_struct *info)
8053{
8054 if (netif_queue_stopped(info->netdev))
8055 netif_wake_queue(info->netdev);
8056}
8057
8058/**
8059 * called by device driver when frame received
8060 * pass frame to network layer
8061 *
8062 * info pointer to device instance information
8063 * buf pointer to buffer contianing frame data
8064 * size count of data bytes in buf
8065 */
8066static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
8067{
8068 struct sk_buff *skb = dev_alloc_skb(size);
8069 struct net_device *dev = info->netdev;
8070 struct net_device_stats *stats = hdlc_stats(dev);
8071
8072 if (debug_level >= DEBUG_LEVEL_INFO)
8073 printk("hdlcdev_rx(%s)\n",dev->name);
8074
8075 if (skb == NULL) {
8076 printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
8077 stats->rx_dropped++;
8078 return;
8079 }
8080
8081 memcpy(skb_put(skb, size),buf,size);
8082
8083 skb->protocol = hdlc_type_trans(skb, info->netdev);
8084
8085 stats->rx_packets++;
8086 stats->rx_bytes += size;
8087
8088 netif_rx(skb);
8089
8090 info->netdev->last_rx = jiffies;
8091}
8092
8093/**
8094 * called by device driver when adding device instance
8095 * do generic HDLC initialization
8096 *
8097 * info pointer to device instance information
8098 *
8099 * returns 0 if success, otherwise error code
8100 */
8101static int hdlcdev_init(struct mgsl_struct *info)
8102{
8103 int rc;
8104 struct net_device *dev;
8105 hdlc_device *hdlc;
8106
8107 /* allocate and initialize network and HDLC layer objects */
8108
8109 if (!(dev = alloc_hdlcdev(info))) {
8110 printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
8111 return -ENOMEM;
8112 }
8113
8114 /* for network layer reporting purposes only */
8115 dev->base_addr = info->io_base;
8116 dev->irq = info->irq_level;
8117 dev->dma = info->dma_level;
8118
8119 /* network layer callbacks and settings */
8120 dev->do_ioctl = hdlcdev_ioctl;
8121 dev->open = hdlcdev_open;
8122 dev->stop = hdlcdev_close;
8123 dev->tx_timeout = hdlcdev_tx_timeout;
8124 dev->watchdog_timeo = 10*HZ;
8125 dev->tx_queue_len = 50;
8126
8127 /* generic HDLC layer callbacks and settings */
8128 hdlc = dev_to_hdlc(dev);
8129 hdlc->attach = hdlcdev_attach;
8130 hdlc->xmit = hdlcdev_xmit;
8131
8132 /* register objects with HDLC layer */
8133 if ((rc = register_hdlc_device(dev))) {
8134 printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
8135 free_netdev(dev);
8136 return rc;
8137 }
8138
8139 info->netdev = dev;
8140 return 0;
8141}
8142
8143/**
8144 * called by device driver when removing device instance
8145 * do generic HDLC cleanup
8146 *
8147 * info pointer to device instance information
8148 */
8149static void hdlcdev_exit(struct mgsl_struct *info)
8150{
8151 unregister_hdlc_device(info->netdev);
8152 free_netdev(info->netdev);
8153 info->netdev = NULL;
8154}
8155
8156#endif /* CONFIG_HDLC */
8157
8158
8159static int __devinit synclink_init_one (struct pci_dev *dev,
8160 const struct pci_device_id *ent)
8161{
8162 struct mgsl_struct *info;
8163
8164 if (pci_enable_device(dev)) {
8165 printk("error enabling pci device %p\n", dev);
8166 return -EIO;
8167 }
8168
8169 if (!(info = mgsl_allocate_device())) {
8170 printk("can't allocate device instance data.\n");
8171 return -EIO;
8172 }
8173
8174 /* Copy user configuration info to device instance data */
8175
8176 info->io_base = pci_resource_start(dev, 2);
8177 info->irq_level = dev->irq;
8178 info->phys_memory_base = pci_resource_start(dev, 3);
8179
8180 /* Because veremap only works on page boundaries we must map
8181 * a larger area than is actually implemented for the LCR
8182 * memory range. We map a full page starting at the page boundary.
8183 */
8184 info->phys_lcr_base = pci_resource_start(dev, 0);
8185 info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
8186 info->phys_lcr_base &= ~(PAGE_SIZE-1);
8187
8188 info->bus_type = MGSL_BUS_TYPE_PCI;
8189 info->io_addr_size = 8;
8190 info->irq_flags = SA_SHIRQ;
8191
8192 if (dev->device == 0x0210) {
8193 /* Version 1 PCI9030 based universal PCI adapter */
8194 info->misc_ctrl_value = 0x007c4080;
8195 info->hw_version = 1;
8196 } else {
8197 /* Version 0 PCI9050 based 5V PCI adapter
8198 * A PCI9050 bug prevents reading LCR registers if
8199 * LCR base address bit 7 is set. Maintain shadow
8200 * value so we can write to LCR misc control reg.
8201 */
8202 info->misc_ctrl_value = 0x087e4546;
8203 info->hw_version = 0;
8204 }
8205
8206 mgsl_add_device(info);
8207
8208 return 0;
8209}
8210
8211static void __devexit synclink_remove_one (struct pci_dev *dev)
8212{
8213}
8214