diff options
author | Jesper Nilsson <jespern@stork.se.axis.com> | 2007-11-29 11:30:24 -0500 |
---|---|---|
committer | Jesper Nilsson <jesper.nilsson@axis.com> | 2008-02-08 05:06:23 -0500 |
commit | e908dfc3c08d684b115f6fbd3740c6b77e0ddaf8 (patch) | |
tree | 66ebb2197da4f1b4ab217c6dedde72430da5ea17 /arch/cris | |
parent | ca91d5b098700570f308dea0b228829fd4c37f14 (diff) |
CRIS v32: Update synchronous serial driver.
Now uses a DMA descriptor ring, which should avoid any unnecessary
pauses in the streams.
Diffstat (limited to 'arch/cris')
-rw-r--r-- | arch/cris/arch-v32/drivers/sync_serial.c | 940 |
1 files changed, 599 insertions, 341 deletions
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c index d581b0a92a3f..eddb98707347 100644 --- a/arch/cris/arch-v32/drivers/sync_serial.c +++ b/arch/cris/arch-v32/drivers/sync_serial.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Simple synchronous serial port driver for ETRAX FS. | 2 | * Simple synchronous serial port driver for ETRAX FS and Artpec-3. |
3 | * | 3 | * |
4 | * Copyright (c) 2005 Axis Communications AB | 4 | * Copyright (c) 2005 Axis Communications AB |
5 | * | 5 | * |
@@ -21,17 +21,18 @@ | |||
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | 22 | ||
23 | #include <asm/io.h> | 23 | #include <asm/io.h> |
24 | #include <asm/arch/dma.h> | 24 | #include <dma.h> |
25 | #include <asm/arch/pinmux.h> | 25 | #include <pinmux.h> |
26 | #include <asm/arch/hwregs/reg_rdwr.h> | 26 | #include <hwregs/reg_rdwr.h> |
27 | #include <asm/arch/hwregs/sser_defs.h> | 27 | #include <hwregs/sser_defs.h> |
28 | #include <asm/arch/hwregs/dma_defs.h> | 28 | #include <hwregs/dma_defs.h> |
29 | #include <asm/arch/hwregs/dma.h> | 29 | #include <hwregs/dma.h> |
30 | #include <asm/arch/hwregs/intr_vect_defs.h> | 30 | #include <hwregs/intr_vect_defs.h> |
31 | #include <asm/arch/hwregs/intr_vect.h> | 31 | #include <hwregs/intr_vect.h> |
32 | #include <asm/arch/hwregs/reg_map.h> | 32 | #include <hwregs/reg_map.h> |
33 | #include <asm/sync_serial.h> | 33 | #include <asm/sync_serial.h> |
34 | 34 | ||
35 | |||
35 | /* The receiver is a bit tricky beacuse of the continuous stream of data.*/ | 36 | /* The receiver is a bit tricky beacuse of the continuous stream of data.*/ |
36 | /* */ | 37 | /* */ |
37 | /* Three DMA descriptors are linked together. Each DMA descriptor is */ | 38 | /* Three DMA descriptors are linked together. Each DMA descriptor is */ |
@@ -63,8 +64,10 @@ | |||
63 | /* words can be handled */ | 64 | /* words can be handled */ |
64 | #define IN_BUFFER_SIZE 12288 | 65 | #define IN_BUFFER_SIZE 12288 |
65 | #define IN_DESCR_SIZE 256 | 66 | #define IN_DESCR_SIZE 256 |
66 | #define NUM_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE) | 67 | #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE) |
67 | #define OUT_BUFFER_SIZE 4096 | 68 | |
69 | #define OUT_BUFFER_SIZE 1024*8 | ||
70 | #define NBR_OUT_DESCR 8 | ||
68 | 71 | ||
69 | #define DEFAULT_FRAME_RATE 0 | 72 | #define DEFAULT_FRAME_RATE 0 |
70 | #define DEFAULT_WORD_RATE 7 | 73 | #define DEFAULT_WORD_RATE 7 |
@@ -78,6 +81,8 @@ | |||
78 | #define DEBUGPOLL(x) | 81 | #define DEBUGPOLL(x) |
79 | #define DEBUGRXINT(x) | 82 | #define DEBUGRXINT(x) |
80 | #define DEBUGTXINT(x) | 83 | #define DEBUGTXINT(x) |
84 | #define DEBUGTRDMA(x) | ||
85 | #define DEBUGOUTBUF(x) | ||
81 | 86 | ||
82 | typedef struct sync_port | 87 | typedef struct sync_port |
83 | { | 88 | { |
@@ -97,10 +102,11 @@ typedef struct sync_port | |||
97 | int output; | 102 | int output; |
98 | int input; | 103 | int input; |
99 | 104 | ||
100 | volatile unsigned int out_count; /* Remaining bytes for current transfer */ | 105 | /* Next byte to be read by application */ |
101 | unsigned char* outp; /* Current position in out_buffer */ | 106 | volatile unsigned char *volatile readp; |
102 | volatile unsigned char* volatile readp; /* Next byte to be read by application */ | 107 | /* Next byte to be written by etrax */ |
103 | volatile unsigned char* volatile writep; /* Next byte to be written by etrax */ | 108 | volatile unsigned char *volatile writep; |
109 | |||
104 | unsigned int in_buffer_size; | 110 | unsigned int in_buffer_size; |
105 | unsigned int inbufchunk; | 111 | unsigned int inbufchunk; |
106 | unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32))); | 112 | unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32))); |
@@ -108,11 +114,30 @@ typedef struct sync_port | |||
108 | unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32))); | 114 | unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32))); |
109 | struct dma_descr_data* next_rx_desc; | 115 | struct dma_descr_data* next_rx_desc; |
110 | struct dma_descr_data* prev_rx_desc; | 116 | struct dma_descr_data* prev_rx_desc; |
117 | |||
118 | /* Pointer to the first available descriptor in the ring, | ||
119 | * unless active_tr_descr == catch_tr_descr and a dma | ||
120 | * transfer is active */ | ||
121 | struct dma_descr_data *active_tr_descr; | ||
122 | |||
123 | /* Pointer to the first allocated descriptor in the ring */ | ||
124 | struct dma_descr_data *catch_tr_descr; | ||
125 | |||
126 | /* Pointer to the descriptor with the current end-of-list */ | ||
127 | struct dma_descr_data *prev_tr_descr; | ||
111 | int full; | 128 | int full; |
112 | 129 | ||
113 | dma_descr_data in_descr[NUM_IN_DESCR] __attribute__ ((__aligned__(16))); | 130 | /* Pointer to the first byte being read by DMA |
131 | * or current position in out_buffer if not using DMA. */ | ||
132 | unsigned char *out_rd_ptr; | ||
133 | |||
134 | /* Number of bytes currently locked for being read by DMA */ | ||
135 | int out_buf_count; | ||
136 | |||
137 | dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16))); | ||
114 | dma_descr_context in_context __attribute__ ((__aligned__(32))); | 138 | dma_descr_context in_context __attribute__ ((__aligned__(32))); |
115 | dma_descr_data out_descr __attribute__ ((__aligned__(16))); | 139 | dma_descr_data out_descr[NBR_OUT_DESCR] |
140 | __attribute__ ((__aligned__(16))); | ||
116 | dma_descr_context out_context __attribute__ ((__aligned__(32))); | 141 | dma_descr_context out_context __attribute__ ((__aligned__(32))); |
117 | wait_queue_head_t out_wait_q; | 142 | wait_queue_head_t out_wait_q; |
118 | wait_queue_head_t in_wait_q; | 143 | wait_queue_head_t in_wait_q; |
@@ -121,7 +146,9 @@ typedef struct sync_port | |||
121 | } sync_port; | 146 | } sync_port; |
122 | 147 | ||
123 | static int etrax_sync_serial_init(void); | 148 | static int etrax_sync_serial_init(void); |
149 | #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) | ||
124 | static void initialize_port(int portnbr); | 150 | static void initialize_port(int portnbr); |
151 | #endif | ||
125 | static inline int sync_data_avail(struct sync_port *port); | 152 | static inline int sync_data_avail(struct sync_port *port); |
126 | 153 | ||
127 | static int sync_serial_open(struct inode *, struct file*); | 154 | static int sync_serial_open(struct inode *, struct file*); |
@@ -143,11 +170,11 @@ static ssize_t sync_serial_read(struct file *file, char *buf, | |||
143 | #endif | 170 | #endif |
144 | 171 | ||
145 | static void send_word(sync_port* port); | 172 | static void send_word(sync_port* port); |
146 | static void start_dma(struct sync_port *port, const char* data, int count); | 173 | static void start_dma_out(struct sync_port *port, const char *data, int count); |
147 | static void start_dma_in(sync_port* port); | 174 | static void start_dma_in(sync_port* port); |
148 | #ifdef SYNC_SER_DMA | 175 | #ifdef SYNC_SER_DMA |
149 | static irqreturn_t tr_interrupt(int irq, void *dev_id, struct pt_regs * regs); | 176 | static irqreturn_t tr_interrupt(int irq, void *dev_id); |
150 | static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs); | 177 | static irqreturn_t rx_interrupt(int irq, void *dev_id); |
151 | #endif | 178 | #endif |
152 | 179 | ||
153 | #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ | 180 | #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \ |
@@ -157,22 +184,49 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs); | |||
157 | #define SYNC_SER_MANUAL | 184 | #define SYNC_SER_MANUAL |
158 | #endif | 185 | #endif |
159 | #ifdef SYNC_SER_MANUAL | 186 | #ifdef SYNC_SER_MANUAL |
160 | static irqreturn_t manual_interrupt(int irq, void *dev_id, struct pt_regs * regs); | 187 | static irqreturn_t manual_interrupt(int irq, void *dev_id); |
188 | #endif | ||
189 | |||
190 | #ifdef CONFIG_ETRAXFS /* ETRAX FS */ | ||
191 | #define OUT_DMA_NBR 4 | ||
192 | #define IN_DMA_NBR 5 | ||
193 | #define PINMUX_SSER pinmux_sser0 | ||
194 | #define SYNCSER_INST regi_sser0 | ||
195 | #define SYNCSER_INTR_VECT SSER0_INTR_VECT | ||
196 | #define OUT_DMA_INST regi_dma4 | ||
197 | #define IN_DMA_INST regi_dma5 | ||
198 | #define DMA_OUT_INTR_VECT DMA4_INTR_VECT | ||
199 | #define DMA_IN_INTR_VECT DMA5_INTR_VECT | ||
200 | #define REQ_DMA_SYNCSER dma_sser0 | ||
201 | #else /* Artpec-3 */ | ||
202 | #define OUT_DMA_NBR 6 | ||
203 | #define IN_DMA_NBR 7 | ||
204 | #define PINMUX_SSER pinmux_sser | ||
205 | #define SYNCSER_INST regi_sser | ||
206 | #define SYNCSER_INTR_VECT SSER_INTR_VECT | ||
207 | #define OUT_DMA_INST regi_dma6 | ||
208 | #define IN_DMA_INST regi_dma7 | ||
209 | #define DMA_OUT_INTR_VECT DMA6_INTR_VECT | ||
210 | #define DMA_IN_INTR_VECT DMA7_INTR_VECT | ||
211 | #define REQ_DMA_SYNCSER dma_sser | ||
161 | #endif | 212 | #endif |
162 | 213 | ||
163 | /* The ports */ | 214 | /* The ports */ |
164 | static struct sync_port ports[]= | 215 | static struct sync_port ports[]= |
165 | { | 216 | { |
166 | { | 217 | { |
167 | .regi_sser = regi_sser0, | 218 | .regi_sser = SYNCSER_INST, |
168 | .regi_dmaout = regi_dma4, | 219 | .regi_dmaout = OUT_DMA_INST, |
169 | .regi_dmain = regi_dma5, | 220 | .regi_dmain = IN_DMA_INST, |
170 | #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) | 221 | #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA) |
171 | .use_dma = 1, | 222 | .use_dma = 1, |
172 | #else | 223 | #else |
173 | .use_dma = 0, | 224 | .use_dma = 0, |
174 | #endif | 225 | #endif |
175 | }, | 226 | } |
227 | #ifdef CONFIG_ETRAXFS | ||
228 | , | ||
229 | |||
176 | { | 230 | { |
177 | .regi_sser = regi_sser1, | 231 | .regi_sser = regi_sser1, |
178 | .regi_dmaout = regi_dma6, | 232 | .regi_dmaout = regi_dma6, |
@@ -183,9 +237,10 @@ static struct sync_port ports[]= | |||
183 | .use_dma = 0, | 237 | .use_dma = 0, |
184 | #endif | 238 | #endif |
185 | } | 239 | } |
240 | #endif | ||
186 | }; | 241 | }; |
187 | 242 | ||
188 | #define NUMBER_OF_PORTS ARRAY_SIZE(ports) | 243 | #define NBR_PORTS ARRAY_SIZE(ports) |
189 | 244 | ||
190 | static const struct file_operations sync_serial_fops = { | 245 | static const struct file_operations sync_serial_fops = { |
191 | .owner = THIS_MODULE, | 246 | .owner = THIS_MODULE, |
@@ -200,19 +255,21 @@ static const struct file_operations sync_serial_fops = { | |||
200 | static int __init etrax_sync_serial_init(void) | 255 | static int __init etrax_sync_serial_init(void) |
201 | { | 256 | { |
202 | ports[0].enabled = 0; | 257 | ports[0].enabled = 0; |
258 | #ifdef CONFIG_ETRAXFS | ||
203 | ports[1].enabled = 0; | 259 | ports[1].enabled = 0; |
204 | 260 | #endif | |
205 | if (register_chrdev(SYNC_SERIAL_MAJOR,"sync serial", &sync_serial_fops) <0 ) | 261 | if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial", |
206 | { | 262 | &sync_serial_fops) < 0) { |
207 | printk("unable to get major for synchronous serial port\n"); | 263 | printk(KERN_WARNING |
264 | "Unable to get major for synchronous serial port\n"); | ||
208 | return -EBUSY; | 265 | return -EBUSY; |
209 | } | 266 | } |
210 | 267 | ||
211 | /* Initialize Ports */ | 268 | /* Initialize Ports */ |
212 | #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) | 269 | #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) |
213 | if (crisv32_pinmux_alloc_fixed(pinmux_sser0)) | 270 | if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) { |
214 | { | 271 | printk(KERN_WARNING |
215 | printk("Unable to allocate pins for syncrhronous serial port 0\n"); | 272 | "Unable to alloc pins for synchronous serial port 0\n"); |
216 | return -EIO; | 273 | return -EIO; |
217 | } | 274 | } |
218 | ports[0].enabled = 1; | 275 | ports[0].enabled = 1; |
@@ -220,33 +277,41 @@ static int __init etrax_sync_serial_init(void) | |||
220 | #endif | 277 | #endif |
221 | 278 | ||
222 | #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) | 279 | #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) |
223 | if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) | 280 | if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) { |
224 | { | 281 | printk(KERN_WARNING |
225 | printk("Unable to allocate pins for syncrhronous serial port 0\n"); | 282 | "Unable to alloc pins for synchronous serial port 0\n"); |
226 | return -EIO; | 283 | return -EIO; |
227 | } | 284 | } |
228 | ports[1].enabled = 1; | 285 | ports[1].enabled = 1; |
229 | initialize_port(1); | 286 | initialize_port(1); |
230 | #endif | 287 | #endif |
231 | 288 | ||
232 | printk("ETRAX FS synchronous serial port driver\n"); | 289 | #ifdef CONFIG_ETRAXFS |
290 | printk(KERN_INFO "ETRAX FS synchronous serial port driver\n"); | ||
291 | #else | ||
292 | printk(KERN_INFO "Artpec-3 synchronous serial port driver\n"); | ||
293 | #endif | ||
233 | return 0; | 294 | return 0; |
234 | } | 295 | } |
235 | 296 | ||
297 | #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) | ||
236 | static void __init initialize_port(int portnbr) | 298 | static void __init initialize_port(int portnbr) |
237 | { | 299 | { |
238 | struct sync_port* port = &ports[portnbr]; | 300 | int __attribute__((unused)) i; |
301 | struct sync_port *port = &ports[portnbr]; | ||
239 | reg_sser_rw_cfg cfg = {0}; | 302 | reg_sser_rw_cfg cfg = {0}; |
240 | reg_sser_rw_frm_cfg frm_cfg = {0}; | 303 | reg_sser_rw_frm_cfg frm_cfg = {0}; |
241 | reg_sser_rw_tr_cfg tr_cfg = {0}; | 304 | reg_sser_rw_tr_cfg tr_cfg = {0}; |
242 | reg_sser_rw_rec_cfg rec_cfg = {0}; | 305 | reg_sser_rw_rec_cfg rec_cfg = {0}; |
243 | 306 | ||
244 | DEBUG(printk("Init sync serial port %d\n", portnbr)); | 307 | DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr)); |
245 | 308 | ||
246 | port->port_nbr = portnbr; | 309 | port->port_nbr = portnbr; |
247 | port->init_irqs = 1; | 310 | port->init_irqs = 1; |
248 | 311 | ||
249 | port->outp = port->out_buffer; | 312 | port->out_rd_ptr = port->out_buffer; |
313 | port->out_buf_count = 0; | ||
314 | |||
250 | port->output = 1; | 315 | port->output = 1; |
251 | port->input = 0; | 316 | port->input = 0; |
252 | 317 | ||
@@ -255,7 +320,7 @@ static void __init initialize_port(int portnbr) | |||
255 | port->in_buffer_size = IN_BUFFER_SIZE; | 320 | port->in_buffer_size = IN_BUFFER_SIZE; |
256 | port->inbufchunk = IN_DESCR_SIZE; | 321 | port->inbufchunk = IN_DESCR_SIZE; |
257 | port->next_rx_desc = &port->in_descr[0]; | 322 | port->next_rx_desc = &port->in_descr[0]; |
258 | port->prev_rx_desc = &port->in_descr[NUM_IN_DESCR-1]; | 323 | port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1]; |
259 | port->prev_rx_desc->eol = 1; | 324 | port->prev_rx_desc->eol = 1; |
260 | 325 | ||
261 | init_waitqueue_head(&port->out_wait_q); | 326 | init_waitqueue_head(&port->out_wait_q); |
@@ -286,8 +351,13 @@ static void __init initialize_port(int portnbr) | |||
286 | tr_cfg.sample_size = 7; | 351 | tr_cfg.sample_size = 7; |
287 | tr_cfg.sh_dir = regk_sser_msbfirst; | 352 | tr_cfg.sh_dir = regk_sser_msbfirst; |
288 | tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; | 353 | tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; |
354 | #if 0 | ||
289 | tr_cfg.rate_ctrl = regk_sser_bulk; | 355 | tr_cfg.rate_ctrl = regk_sser_bulk; |
290 | tr_cfg.data_pin_use = regk_sser_dout; | 356 | tr_cfg.data_pin_use = regk_sser_dout; |
357 | #else | ||
358 | tr_cfg.rate_ctrl = regk_sser_iso; | ||
359 | tr_cfg.data_pin_use = regk_sser_dout; | ||
360 | #endif | ||
291 | tr_cfg.bulk_wspace = 1; | 361 | tr_cfg.bulk_wspace = 1; |
292 | REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); | 362 | REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); |
293 | 363 | ||
@@ -296,7 +366,29 @@ static void __init initialize_port(int portnbr) | |||
296 | rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; | 366 | rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no; |
297 | rec_cfg.fifo_thr = regk_sser_inf; | 367 | rec_cfg.fifo_thr = regk_sser_inf; |
298 | REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); | 368 | REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); |
369 | |||
370 | #ifdef SYNC_SER_DMA | ||
371 | /* Setup the descriptor ring for dma out/transmit. */ | ||
372 | for (i = 0; i < NBR_OUT_DESCR; i++) { | ||
373 | port->out_descr[i].wait = 0; | ||
374 | port->out_descr[i].intr = 1; | ||
375 | port->out_descr[i].eol = 0; | ||
376 | port->out_descr[i].out_eop = 0; | ||
377 | port->out_descr[i].next = | ||
378 | (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]); | ||
379 | } | ||
380 | |||
381 | /* Create a ring from the list. */ | ||
382 | port->out_descr[NBR_OUT_DESCR-1].next = | ||
383 | (dma_descr_data *)virt_to_phys(&port->out_descr[0]); | ||
384 | |||
385 | /* Setup context for traversing the ring. */ | ||
386 | port->active_tr_descr = &port->out_descr[0]; | ||
387 | port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1]; | ||
388 | port->catch_tr_descr = &port->out_descr[0]; | ||
389 | #endif | ||
299 | } | 390 | } |
391 | #endif | ||
300 | 392 | ||
301 | static inline int sync_data_avail(struct sync_port *port) | 393 | static inline int sync_data_avail(struct sync_port *port) |
302 | { | 394 | { |
@@ -311,7 +403,7 @@ static inline int sync_data_avail(struct sync_port *port) | |||
311 | * ^rp ^wp ^wp ^rp | 403 | * ^rp ^wp ^wp ^rp |
312 | */ | 404 | */ |
313 | 405 | ||
314 | if (end >= start) | 406 | if (end >= start) |
315 | avail = end - start; | 407 | avail = end - start; |
316 | else | 408 | else |
317 | avail = port->in_buffer_size - (start - end); | 409 | avail = port->in_buffer_size - (start - end); |
@@ -331,7 +423,7 @@ static inline int sync_data_avail_to_end(struct sync_port *port) | |||
331 | * ^rp ^wp ^wp ^rp | 423 | * ^rp ^wp ^wp ^rp |
332 | */ | 424 | */ |
333 | 425 | ||
334 | if (end >= start) | 426 | if (end >= start) |
335 | avail = end - start; | 427 | avail = end - start; |
336 | else | 428 | else |
337 | avail = port->flip + port->in_buffer_size - start; | 429 | avail = port->flip + port->in_buffer_size - start; |
@@ -341,66 +433,69 @@ static inline int sync_data_avail_to_end(struct sync_port *port) | |||
341 | static int sync_serial_open(struct inode *inode, struct file *file) | 433 | static int sync_serial_open(struct inode *inode, struct file *file) |
342 | { | 434 | { |
343 | int dev = iminor(inode); | 435 | int dev = iminor(inode); |
344 | sync_port* port; | 436 | sync_port *port; |
345 | reg_dma_rw_cfg cfg = {.en = regk_dma_yes}; | 437 | reg_dma_rw_cfg cfg = {.en = regk_dma_yes}; |
346 | reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes}; | 438 | reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes}; |
347 | 439 | ||
348 | DEBUG(printk("Open sync serial port %d\n", dev)); | 440 | DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev)); |
349 | 441 | ||
350 | if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) | 442 | if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) |
351 | { | 443 | { |
352 | DEBUG(printk("Invalid minor %d\n", dev)); | 444 | DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev)); |
353 | return -ENODEV; | 445 | return -ENODEV; |
354 | } | 446 | } |
355 | port = &ports[dev]; | 447 | port = &ports[dev]; |
356 | /* Allow open this device twice (assuming one reader and one writer) */ | 448 | /* Allow open this device twice (assuming one reader and one writer) */ |
357 | if (port->busy == 2) | 449 | if (port->busy == 2) |
358 | { | 450 | { |
359 | DEBUG(printk("Device is busy.. \n")); | 451 | DEBUG(printk(KERN_DEBUG "Device is busy.. \n")); |
360 | return -EBUSY; | 452 | return -EBUSY; |
361 | } | 453 | } |
454 | |||
455 | |||
362 | if (port->init_irqs) { | 456 | if (port->init_irqs) { |
363 | if (port->use_dma) { | 457 | if (port->use_dma) { |
364 | if (port == &ports[0]){ | 458 | if (port == &ports[0]) { |
365 | #ifdef SYNC_SER_DMA | 459 | #ifdef SYNC_SER_DMA |
366 | if(request_irq(DMA4_INTR_VECT, | 460 | if (request_irq(DMA_OUT_INTR_VECT, |
367 | tr_interrupt, | 461 | tr_interrupt, |
368 | 0, | 462 | 0, |
369 | "synchronous serial 0 dma tr", | 463 | "synchronous serial 0 dma tr", |
370 | &ports[0])) { | 464 | &ports[0])) { |
371 | printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); | 465 | printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); |
372 | return -EBUSY; | 466 | return -EBUSY; |
373 | } else if(request_irq(DMA5_INTR_VECT, | 467 | } else if (request_irq(DMA_IN_INTR_VECT, |
374 | rx_interrupt, | 468 | rx_interrupt, |
375 | 0, | 469 | 0, |
376 | "synchronous serial 1 dma rx", | 470 | "synchronous serial 1 dma rx", |
377 | &ports[0])) { | 471 | &ports[0])) { |
378 | free_irq(DMA4_INTR_VECT, &port[0]); | 472 | free_irq(DMA_OUT_INTR_VECT, &port[0]); |
379 | printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); | 473 | printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ"); |
380 | return -EBUSY; | 474 | return -EBUSY; |
381 | } else if (crisv32_request_dma(SYNC_SER0_TX_DMA_NBR, | 475 | } else if (crisv32_request_dma(OUT_DMA_NBR, |
382 | "synchronous serial 0 dma tr", | 476 | "synchronous serial 0 dma tr", |
383 | DMA_VERBOSE_ON_ERROR, | 477 | DMA_VERBOSE_ON_ERROR, |
384 | 0, | 478 | 0, |
385 | dma_sser0)) { | 479 | REQ_DMA_SYNCSER)) { |
386 | free_irq(DMA4_INTR_VECT, &port[0]); | 480 | free_irq(DMA_OUT_INTR_VECT, &port[0]); |
387 | free_irq(DMA5_INTR_VECT, &port[0]); | 481 | free_irq(DMA_IN_INTR_VECT, &port[0]); |
388 | printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel"); | 482 | printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel"); |
389 | return -EBUSY; | 483 | return -EBUSY; |
390 | } else if (crisv32_request_dma(SYNC_SER0_RX_DMA_NBR, | 484 | } else if (crisv32_request_dma(IN_DMA_NBR, |
391 | "synchronous serial 0 dma rec", | 485 | "synchronous serial 0 dma rec", |
392 | DMA_VERBOSE_ON_ERROR, | 486 | DMA_VERBOSE_ON_ERROR, |
393 | 0, | 487 | 0, |
394 | dma_sser0)) { | 488 | REQ_DMA_SYNCSER)) { |
395 | crisv32_free_dma(SYNC_SER0_TX_DMA_NBR); | 489 | crisv32_free_dma(OUT_DMA_NBR); |
396 | free_irq(DMA4_INTR_VECT, &port[0]); | 490 | free_irq(DMA_OUT_INTR_VECT, &port[0]); |
397 | free_irq(DMA5_INTR_VECT, &port[0]); | 491 | free_irq(DMA_IN_INTR_VECT, &port[0]); |
398 | printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel"); | 492 | printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel"); |
399 | return -EBUSY; | 493 | return -EBUSY; |
400 | } | 494 | } |
401 | #endif | 495 | #endif |
402 | } | 496 | } |
403 | else if (port == &ports[1]){ | 497 | #ifdef CONFIG_ETRAXFS |
498 | else if (port == &ports[1]) { | ||
404 | #ifdef SYNC_SER_DMA | 499 | #ifdef SYNC_SER_DMA |
405 | if (request_irq(DMA6_INTR_VECT, | 500 | if (request_irq(DMA6_INTR_VECT, |
406 | tr_interrupt, | 501 | tr_interrupt, |
@@ -417,20 +512,22 @@ static int sync_serial_open(struct inode *inode, struct file *file) | |||
417 | free_irq(DMA6_INTR_VECT, &ports[1]); | 512 | free_irq(DMA6_INTR_VECT, &ports[1]); |
418 | printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ"); | 513 | printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ"); |
419 | return -EBUSY; | 514 | return -EBUSY; |
420 | } else if (crisv32_request_dma(SYNC_SER1_TX_DMA_NBR, | 515 | } else if (crisv32_request_dma( |
421 | "synchronous serial 1 dma tr", | 516 | SYNC_SER1_TX_DMA_NBR, |
422 | DMA_VERBOSE_ON_ERROR, | 517 | "synchronous serial 1 dma tr", |
423 | 0, | 518 | DMA_VERBOSE_ON_ERROR, |
424 | dma_sser1)) { | 519 | 0, |
425 | free_irq(21, &ports[1]); | 520 | dma_sser1)) { |
426 | free_irq(20, &ports[1]); | 521 | free_irq(DMA6_INTR_VECT, &ports[1]); |
522 | free_irq(DMA7_INTR_VECT, &ports[1]); | ||
427 | printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel"); | 523 | printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel"); |
428 | return -EBUSY; | 524 | return -EBUSY; |
429 | } else if (crisv32_request_dma(SYNC_SER1_RX_DMA_NBR, | 525 | } else if (crisv32_request_dma( |
430 | "synchronous serial 3 dma rec", | 526 | SYNC_SER1_RX_DMA_NBR, |
431 | DMA_VERBOSE_ON_ERROR, | 527 | "synchronous serial 3 dma rec", |
432 | 0, | 528 | DMA_VERBOSE_ON_ERROR, |
433 | dma_sser1)) { | 529 | 0, |
530 | dma_sser1)) { | ||
434 | crisv32_free_dma(SYNC_SER1_TX_DMA_NBR); | 531 | crisv32_free_dma(SYNC_SER1_TX_DMA_NBR); |
435 | free_irq(DMA6_INTR_VECT, &ports[1]); | 532 | free_irq(DMA6_INTR_VECT, &ports[1]); |
436 | free_irq(DMA7_INTR_VECT, &ports[1]); | 533 | free_irq(DMA7_INTR_VECT, &ports[1]); |
@@ -439,14 +536,14 @@ static int sync_serial_open(struct inode *inode, struct file *file) | |||
439 | } | 536 | } |
440 | #endif | 537 | #endif |
441 | } | 538 | } |
442 | 539 | #endif | |
443 | /* Enable DMAs */ | 540 | /* Enable DMAs */ |
444 | REG_WR(dma, port->regi_dmain, rw_cfg, cfg); | 541 | REG_WR(dma, port->regi_dmain, rw_cfg, cfg); |
445 | REG_WR(dma, port->regi_dmaout, rw_cfg, cfg); | 542 | REG_WR(dma, port->regi_dmaout, rw_cfg, cfg); |
446 | /* Enable DMA IRQs */ | 543 | /* Enable DMA IRQs */ |
447 | REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask); | 544 | REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask); |
448 | REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask); | 545 | REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask); |
449 | /* Set up wordsize = 2 for DMAs. */ | 546 | /* Set up wordsize = 1 for DMAs. */ |
450 | DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1); | 547 | DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1); |
451 | DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1); | 548 | DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1); |
452 | 549 | ||
@@ -455,7 +552,7 @@ static int sync_serial_open(struct inode *inode, struct file *file) | |||
455 | } else { /* !port->use_dma */ | 552 | } else { /* !port->use_dma */ |
456 | #ifdef SYNC_SER_MANUAL | 553 | #ifdef SYNC_SER_MANUAL |
457 | if (port == &ports[0]) { | 554 | if (port == &ports[0]) { |
458 | if (request_irq(SSER0_INTR_VECT, | 555 | if (request_irq(SYNCSER_INTR_VECT, |
459 | manual_interrupt, | 556 | manual_interrupt, |
460 | 0, | 557 | 0, |
461 | "synchronous serial manual irq", | 558 | "synchronous serial manual irq", |
@@ -463,7 +560,9 @@ static int sync_serial_open(struct inode *inode, struct file *file) | |||
463 | printk("Can't allocate sync serial manual irq"); | 560 | printk("Can't allocate sync serial manual irq"); |
464 | return -EBUSY; | 561 | return -EBUSY; |
465 | } | 562 | } |
466 | } else if (port == &ports[1]) { | 563 | } |
564 | #ifdef CONFIG_ETRAXFS | ||
565 | else if (port == &ports[1]) { | ||
467 | if (request_irq(SSER1_INTR_VECT, | 566 | if (request_irq(SSER1_INTR_VECT, |
468 | manual_interrupt, | 567 | manual_interrupt, |
469 | 0, | 568 | 0, |
@@ -473,11 +572,13 @@ static int sync_serial_open(struct inode *inode, struct file *file) | |||
473 | return -EBUSY; | 572 | return -EBUSY; |
474 | } | 573 | } |
475 | } | 574 | } |
575 | #endif | ||
476 | port->init_irqs = 0; | 576 | port->init_irqs = 0; |
477 | #else | 577 | #else |
478 | panic("sync_serial: Manual mode not supported.\n"); | 578 | panic("sync_serial: Manual mode not supported.\n"); |
479 | #endif /* SYNC_SER_MANUAL */ | 579 | #endif /* SYNC_SER_MANUAL */ |
480 | } | 580 | } |
581 | |||
481 | } /* port->init_irqs */ | 582 | } /* port->init_irqs */ |
482 | 583 | ||
483 | port->busy++; | 584 | port->busy++; |
@@ -487,9 +588,9 @@ static int sync_serial_open(struct inode *inode, struct file *file) | |||
487 | static int sync_serial_release(struct inode *inode, struct file *file) | 588 | static int sync_serial_release(struct inode *inode, struct file *file) |
488 | { | 589 | { |
489 | int dev = iminor(inode); | 590 | int dev = iminor(inode); |
490 | sync_port* port; | 591 | sync_port *port; |
491 | 592 | ||
492 | if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) | 593 | if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) |
493 | { | 594 | { |
494 | DEBUG(printk("Invalid minor %d\n", dev)); | 595 | DEBUG(printk("Invalid minor %d\n", dev)); |
495 | return -ENODEV; | 596 | return -ENODEV; |
@@ -506,17 +607,37 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait) | |||
506 | { | 607 | { |
507 | int dev = iminor(file->f_path.dentry->d_inode); | 608 | int dev = iminor(file->f_path.dentry->d_inode); |
508 | unsigned int mask = 0; | 609 | unsigned int mask = 0; |
509 | sync_port* port; | 610 | sync_port *port; |
510 | DEBUGPOLL( static unsigned int prev_mask = 0; ); | 611 | DEBUGPOLL( static unsigned int prev_mask = 0; ); |
511 | 612 | ||
512 | port = &ports[dev]; | 613 | port = &ports[dev]; |
614 | |||
615 | if (!port->started) { | ||
616 | reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); | ||
617 | reg_sser_rw_rec_cfg rec_cfg = | ||
618 | REG_RD(sser, port->regi_sser, rw_rec_cfg); | ||
619 | cfg.en = regk_sser_yes; | ||
620 | rec_cfg.rec_en = port->input; | ||
621 | REG_WR(sser, port->regi_sser, rw_cfg, cfg); | ||
622 | REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); | ||
623 | port->started = 1; | ||
624 | } | ||
625 | |||
513 | poll_wait(file, &port->out_wait_q, wait); | 626 | poll_wait(file, &port->out_wait_q, wait); |
514 | poll_wait(file, &port->in_wait_q, wait); | 627 | poll_wait(file, &port->in_wait_q, wait); |
515 | /* Some room to write */ | 628 | |
516 | if (port->out_count < OUT_BUFFER_SIZE) | 629 | /* No active transfer, descriptors are available */ |
630 | if (port->output && !port->tr_running) | ||
631 | mask |= POLLOUT | POLLWRNORM; | ||
632 | |||
633 | /* Descriptor and buffer space available. */ | ||
634 | if (port->output && | ||
635 | port->active_tr_descr != port->catch_tr_descr && | ||
636 | port->out_buf_count < OUT_BUFFER_SIZE) | ||
517 | mask |= POLLOUT | POLLWRNORM; | 637 | mask |= POLLOUT | POLLWRNORM; |
638 | |||
518 | /* At least an inbufchunk of data */ | 639 | /* At least an inbufchunk of data */ |
519 | if (sync_data_avail(port) >= port->inbufchunk) | 640 | if (port->input && sync_data_avail(port) >= port->inbufchunk) |
520 | mask |= POLLIN | POLLRDNORM; | 641 | mask |= POLLIN | POLLRDNORM; |
521 | 642 | ||
522 | DEBUGPOLL(if (mask != prev_mask) | 643 | DEBUGPOLL(if (mask != prev_mask) |
@@ -531,15 +652,16 @@ static int sync_serial_ioctl(struct inode *inode, struct file *file, | |||
531 | unsigned int cmd, unsigned long arg) | 652 | unsigned int cmd, unsigned long arg) |
532 | { | 653 | { |
533 | int return_val = 0; | 654 | int return_val = 0; |
655 | int dma_w_size = regk_dma_set_w_size1; | ||
534 | int dev = iminor(file->f_path.dentry->d_inode); | 656 | int dev = iminor(file->f_path.dentry->d_inode); |
535 | sync_port* port; | 657 | sync_port *port; |
536 | reg_sser_rw_tr_cfg tr_cfg; | 658 | reg_sser_rw_tr_cfg tr_cfg; |
537 | reg_sser_rw_rec_cfg rec_cfg; | 659 | reg_sser_rw_rec_cfg rec_cfg; |
538 | reg_sser_rw_frm_cfg frm_cfg; | 660 | reg_sser_rw_frm_cfg frm_cfg; |
539 | reg_sser_rw_cfg gen_cfg; | 661 | reg_sser_rw_cfg gen_cfg; |
540 | reg_sser_rw_intr_mask intr_mask; | 662 | reg_sser_rw_intr_mask intr_mask; |
541 | 663 | ||
542 | if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) | 664 | if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) |
543 | { | 665 | { |
544 | DEBUG(printk("Invalid minor %d\n", dev)); | 666 | DEBUG(printk("Invalid minor %d\n", dev)); |
545 | return -1; | 667 | return -1; |
@@ -558,61 +680,81 @@ static int sync_serial_ioctl(struct inode *inode, struct file *file, | |||
558 | case SSP_SPEED: | 680 | case SSP_SPEED: |
559 | if (GET_SPEED(arg) == CODEC) | 681 | if (GET_SPEED(arg) == CODEC) |
560 | { | 682 | { |
683 | unsigned int freq; | ||
684 | |||
561 | gen_cfg.base_freq = regk_sser_f32; | 685 | gen_cfg.base_freq = regk_sser_f32; |
562 | /* FREQ = 0 => 4 MHz => clk_div = 7*/ | 686 | |
563 | gen_cfg.clk_div = 6 + (1 << GET_FREQ(arg)); | 687 | /* Clock divider will internally be |
564 | } | 688 | * gen_cfg.clk_div + 1. |
565 | else | 689 | */ |
566 | { | 690 | |
691 | freq = GET_FREQ(arg); | ||
692 | switch (freq) { | ||
693 | case FREQ_32kHz: | ||
694 | case FREQ_64kHz: | ||
695 | case FREQ_128kHz: | ||
696 | case FREQ_256kHz: | ||
697 | gen_cfg.clk_div = 125 * | ||
698 | (1 << (freq - FREQ_256kHz)) - 1; | ||
699 | break; | ||
700 | case FREQ_512kHz: | ||
701 | gen_cfg.clk_div = 62; | ||
702 | break; | ||
703 | case FREQ_1MHz: | ||
704 | case FREQ_2MHz: | ||
705 | case FREQ_4MHz: | ||
706 | gen_cfg.clk_div = 8 * (1 << freq) - 1; | ||
707 | break; | ||
708 | } | ||
709 | } else { | ||
567 | gen_cfg.base_freq = regk_sser_f29_493; | 710 | gen_cfg.base_freq = regk_sser_f29_493; |
568 | switch (GET_SPEED(arg)) | 711 | switch (GET_SPEED(arg)) { |
569 | { | 712 | case SSP150: |
570 | case SSP150: | 713 | gen_cfg.clk_div = 29493000 / (150 * 8) - 1; |
571 | gen_cfg.clk_div = 29493000 / (150 * 8) - 1; | 714 | break; |
572 | break; | 715 | case SSP300: |
573 | case SSP300: | 716 | gen_cfg.clk_div = 29493000 / (300 * 8) - 1; |
574 | gen_cfg.clk_div = 29493000 / (300 * 8) - 1; | 717 | break; |
575 | break; | 718 | case SSP600: |
576 | case SSP600: | 719 | gen_cfg.clk_div = 29493000 / (600 * 8) - 1; |
577 | gen_cfg.clk_div = 29493000 / (600 * 8) - 1; | 720 | break; |
578 | break; | 721 | case SSP1200: |
579 | case SSP1200: | 722 | gen_cfg.clk_div = 29493000 / (1200 * 8) - 1; |
580 | gen_cfg.clk_div = 29493000 / (1200 * 8) - 1; | 723 | break; |
581 | break; | 724 | case SSP2400: |
582 | case SSP2400: | 725 | gen_cfg.clk_div = 29493000 / (2400 * 8) - 1; |
583 | gen_cfg.clk_div = 29493000 / (2400 * 8) - 1; | 726 | break; |
584 | break; | 727 | case SSP4800: |
585 | case SSP4800: | 728 | gen_cfg.clk_div = 29493000 / (4800 * 8) - 1; |
586 | gen_cfg.clk_div = 29493000 / (4800 * 8) - 1; | 729 | break; |
587 | break; | 730 | case SSP9600: |
588 | case SSP9600: | 731 | gen_cfg.clk_div = 29493000 / (9600 * 8) - 1; |
589 | gen_cfg.clk_div = 29493000 / (9600 * 8) - 1; | 732 | break; |
590 | break; | 733 | case SSP19200: |
591 | case SSP19200: | 734 | gen_cfg.clk_div = 29493000 / (19200 * 8) - 1; |
592 | gen_cfg.clk_div = 29493000 / (19200 * 8) - 1; | 735 | break; |
593 | break; | 736 | case SSP28800: |
594 | case SSP28800: | 737 | gen_cfg.clk_div = 29493000 / (28800 * 8) - 1; |
595 | gen_cfg.clk_div = 29493000 / (28800 * 8) - 1; | 738 | break; |
596 | break; | 739 | case SSP57600: |
597 | case SSP57600: | 740 | gen_cfg.clk_div = 29493000 / (57600 * 8) - 1; |
598 | gen_cfg.clk_div = 29493000 / (57600 * 8) - 1; | 741 | break; |
599 | break; | 742 | case SSP115200: |
600 | case SSP115200: | 743 | gen_cfg.clk_div = 29493000 / (115200 * 8) - 1; |
601 | gen_cfg.clk_div = 29493000 / (115200 * 8) - 1; | 744 | break; |
602 | break; | 745 | case SSP230400: |
603 | case SSP230400: | 746 | gen_cfg.clk_div = 29493000 / (230400 * 8) - 1; |
604 | gen_cfg.clk_div = 29493000 / (230400 * 8) - 1; | 747 | break; |
605 | break; | 748 | case SSP460800: |
606 | case SSP460800: | 749 | gen_cfg.clk_div = 29493000 / (460800 * 8) - 1; |
607 | gen_cfg.clk_div = 29493000 / (460800 * 8) - 1; | 750 | break; |
608 | break; | 751 | case SSP921600: |
609 | case SSP921600: | 752 | gen_cfg.clk_div = 29493000 / (921600 * 8) - 1; |
610 | gen_cfg.clk_div = 29493000 / (921600 * 8) - 1; | 753 | break; |
611 | break; | 754 | case SSP3125000: |
612 | case SSP3125000: | 755 | gen_cfg.base_freq = regk_sser_f100; |
613 | gen_cfg.base_freq = regk_sser_f100; | 756 | gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1; |
614 | gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1; | 757 | break; |
615 | break; | ||
616 | 758 | ||
617 | } | 759 | } |
618 | } | 760 | } |
@@ -625,46 +767,60 @@ static int sync_serial_ioctl(struct inode *inode, struct file *file, | |||
625 | case MASTER_OUTPUT: | 767 | case MASTER_OUTPUT: |
626 | port->output = 1; | 768 | port->output = 1; |
627 | port->input = 0; | 769 | port->input = 0; |
770 | frm_cfg.out_on = regk_sser_tr; | ||
771 | frm_cfg.frame_pin_dir = regk_sser_out; | ||
628 | gen_cfg.clk_dir = regk_sser_out; | 772 | gen_cfg.clk_dir = regk_sser_out; |
629 | break; | 773 | break; |
630 | case SLAVE_OUTPUT: | 774 | case SLAVE_OUTPUT: |
631 | port->output = 1; | 775 | port->output = 1; |
632 | port->input = 0; | 776 | port->input = 0; |
777 | frm_cfg.frame_pin_dir = regk_sser_in; | ||
633 | gen_cfg.clk_dir = regk_sser_in; | 778 | gen_cfg.clk_dir = regk_sser_in; |
634 | break; | 779 | break; |
635 | case MASTER_INPUT: | 780 | case MASTER_INPUT: |
636 | port->output = 0; | 781 | port->output = 0; |
637 | port->input = 1; | 782 | port->input = 1; |
783 | frm_cfg.frame_pin_dir = regk_sser_out; | ||
784 | frm_cfg.out_on = regk_sser_intern_tb; | ||
638 | gen_cfg.clk_dir = regk_sser_out; | 785 | gen_cfg.clk_dir = regk_sser_out; |
639 | break; | 786 | break; |
640 | case SLAVE_INPUT: | 787 | case SLAVE_INPUT: |
641 | port->output = 0; | 788 | port->output = 0; |
642 | port->input = 1; | 789 | port->input = 1; |
790 | frm_cfg.frame_pin_dir = regk_sser_in; | ||
643 | gen_cfg.clk_dir = regk_sser_in; | 791 | gen_cfg.clk_dir = regk_sser_in; |
644 | break; | 792 | break; |
645 | case MASTER_BIDIR: | 793 | case MASTER_BIDIR: |
646 | port->output = 1; | 794 | port->output = 1; |
647 | port->input = 1; | 795 | port->input = 1; |
796 | frm_cfg.frame_pin_dir = regk_sser_out; | ||
797 | frm_cfg.out_on = regk_sser_intern_tb; | ||
648 | gen_cfg.clk_dir = regk_sser_out; | 798 | gen_cfg.clk_dir = regk_sser_out; |
649 | break; | 799 | break; |
650 | case SLAVE_BIDIR: | 800 | case SLAVE_BIDIR: |
651 | port->output = 1; | 801 | port->output = 1; |
652 | port->input = 1; | 802 | port->input = 1; |
803 | frm_cfg.frame_pin_dir = regk_sser_in; | ||
653 | gen_cfg.clk_dir = regk_sser_in; | 804 | gen_cfg.clk_dir = regk_sser_in; |
654 | break; | 805 | break; |
655 | default: | 806 | default: |
656 | spin_unlock_irq(&port->lock); | 807 | spin_unlock_irq(&port->lock); |
657 | return -EINVAL; | 808 | return -EINVAL; |
658 | |||
659 | } | 809 | } |
660 | if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT)) | 810 | if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT)) |
661 | intr_mask.rdav = regk_sser_yes; | 811 | intr_mask.rdav = regk_sser_yes; |
662 | break; | 812 | break; |
663 | case SSP_FRAME_SYNC: | 813 | case SSP_FRAME_SYNC: |
664 | if (arg & NORMAL_SYNC) | 814 | if (arg & NORMAL_SYNC) { |
815 | frm_cfg.rec_delay = 1; | ||
665 | frm_cfg.tr_delay = 1; | 816 | frm_cfg.tr_delay = 1; |
817 | } | ||
666 | else if (arg & EARLY_SYNC) | 818 | else if (arg & EARLY_SYNC) |
667 | frm_cfg.tr_delay = 0; | 819 | frm_cfg.rec_delay = frm_cfg.tr_delay = 0; |
820 | else if (arg & SECOND_WORD_SYNC) { | ||
821 | frm_cfg.rec_delay = 7; | ||
822 | frm_cfg.tr_delay = 1; | ||
823 | } | ||
668 | 824 | ||
669 | tr_cfg.bulk_wspace = frm_cfg.tr_delay; | 825 | tr_cfg.bulk_wspace = frm_cfg.tr_delay; |
670 | frm_cfg.early_wend = regk_sser_yes; | 826 | frm_cfg.early_wend = regk_sser_yes; |
@@ -680,9 +836,11 @@ static int sync_serial_ioctl(struct inode *inode, struct file *file, | |||
680 | else if (arg & SYNC_OFF) | 836 | else if (arg & SYNC_OFF) |
681 | frm_cfg.frame_pin_use = regk_sser_gio0; | 837 | frm_cfg.frame_pin_use = regk_sser_gio0; |
682 | 838 | ||
683 | if (arg & WORD_SIZE_8) | 839 | dma_w_size = regk_dma_set_w_size2; |
840 | if (arg & WORD_SIZE_8) { | ||
684 | rec_cfg.sample_size = tr_cfg.sample_size = 7; | 841 | rec_cfg.sample_size = tr_cfg.sample_size = 7; |
685 | else if (arg & WORD_SIZE_12) | 842 | dma_w_size = regk_dma_set_w_size1; |
843 | } else if (arg & WORD_SIZE_12) | ||
686 | rec_cfg.sample_size = tr_cfg.sample_size = 11; | 844 | rec_cfg.sample_size = tr_cfg.sample_size = 11; |
687 | else if (arg & WORD_SIZE_16) | 845 | else if (arg & WORD_SIZE_16) |
688 | rec_cfg.sample_size = tr_cfg.sample_size = 15; | 846 | rec_cfg.sample_size = tr_cfg.sample_size = 15; |
@@ -696,10 +854,13 @@ static int sync_serial_ioctl(struct inode *inode, struct file *file, | |||
696 | else if (arg & BIT_ORDER_LSB) | 854 | else if (arg & BIT_ORDER_LSB) |
697 | rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst; | 855 | rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst; |
698 | 856 | ||
699 | if (arg & FLOW_CONTROL_ENABLE) | 857 | if (arg & FLOW_CONTROL_ENABLE) { |
858 | frm_cfg.status_pin_use = regk_sser_frm; | ||
700 | rec_cfg.fifo_thr = regk_sser_thr16; | 859 | rec_cfg.fifo_thr = regk_sser_thr16; |
701 | else if (arg & FLOW_CONTROL_DISABLE) | 860 | } else if (arg & FLOW_CONTROL_DISABLE) { |
861 | frm_cfg.status_pin_use = regk_sser_gio0; | ||
702 | rec_cfg.fifo_thr = regk_sser_inf; | 862 | rec_cfg.fifo_thr = regk_sser_inf; |
863 | } | ||
703 | 864 | ||
704 | if (arg & CLOCK_NOT_GATED) | 865 | if (arg & CLOCK_NOT_GATED) |
705 | gen_cfg.gate_clk = regk_sser_no; | 866 | gen_cfg.gate_clk = regk_sser_no; |
@@ -726,9 +887,9 @@ static int sync_serial_ioctl(struct inode *inode, struct file *file, | |||
726 | break; | 887 | break; |
727 | case SSP_OPOLARITY: | 888 | case SSP_OPOLARITY: |
728 | if (arg & CLOCK_NORMAL) | 889 | if (arg & CLOCK_NORMAL) |
729 | gen_cfg.out_clk_pol = regk_sser_neg; | ||
730 | else if (arg & CLOCK_INVERT) | ||
731 | gen_cfg.out_clk_pol = regk_sser_pos; | 890 | gen_cfg.out_clk_pol = regk_sser_pos; |
891 | else if (arg & CLOCK_INVERT) | ||
892 | gen_cfg.out_clk_pol = regk_sser_neg; | ||
732 | 893 | ||
733 | if (arg & FRAME_NORMAL) | 894 | if (arg & FRAME_NORMAL) |
734 | frm_cfg.level = regk_sser_pos_hi; | 895 | frm_cfg.level = regk_sser_pos_hi; |
@@ -770,10 +931,9 @@ static int sync_serial_ioctl(struct inode *inode, struct file *file, | |||
770 | } | 931 | } |
771 | 932 | ||
772 | 933 | ||
773 | if (port->started) | 934 | if (port->started) { |
774 | { | ||
775 | tr_cfg.tr_en = port->output; | ||
776 | rec_cfg.rec_en = port->input; | 935 | rec_cfg.rec_en = port->input; |
936 | gen_cfg.en = (port->output | port->input); | ||
777 | } | 937 | } |
778 | 938 | ||
779 | REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); | 939 | REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); |
@@ -782,138 +942,145 @@ static int sync_serial_ioctl(struct inode *inode, struct file *file, | |||
782 | REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); | 942 | REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); |
783 | REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); | 943 | REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); |
784 | 944 | ||
945 | |||
946 | if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 | | ||
947 | WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) { | ||
948 | int en = gen_cfg.en; | ||
949 | gen_cfg.en = 0; | ||
950 | REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); | ||
951 | /* ##### Should DMA be stoped before we change dma size? */ | ||
952 | DMA_WR_CMD(port->regi_dmain, dma_w_size); | ||
953 | DMA_WR_CMD(port->regi_dmaout, dma_w_size); | ||
954 | gen_cfg.en = en; | ||
955 | REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg); | ||
956 | } | ||
957 | |||
785 | spin_unlock_irq(&port->lock); | 958 | spin_unlock_irq(&port->lock); |
786 | return return_val; | 959 | return return_val; |
787 | } | 960 | } |
788 | 961 | ||
789 | static ssize_t sync_serial_write(struct file * file, const char * buf, | 962 | /* NOTE: sync_serial_write does not support concurrency */ |
790 | size_t count, loff_t *ppos) | 963 | static ssize_t sync_serial_write(struct file *file, const char *buf, |
964 | size_t count, loff_t *ppos) | ||
791 | { | 965 | { |
792 | int dev = iminor(file->f_path.dentry->d_inode); | 966 | int dev = iminor(file->f_path.dentry->d_inode); |
793 | DECLARE_WAITQUEUE(wait, current); | 967 | DECLARE_WAITQUEUE(wait, current); |
794 | sync_port *port; | 968 | struct sync_port *port; |
795 | unsigned long c, c1; | 969 | int trunc_count; |
796 | unsigned long free_outp; | ||
797 | unsigned long outp; | ||
798 | unsigned long out_buffer; | ||
799 | unsigned long flags; | 970 | unsigned long flags; |
971 | int bytes_free; | ||
972 | int out_buf_count; | ||
800 | 973 | ||
801 | if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) | 974 | unsigned char *rd_ptr; /* First allocated byte in the buffer */ |
802 | { | 975 | unsigned char *wr_ptr; /* First free byte in the buffer */ |
976 | unsigned char *buf_stop_ptr; /* Last byte + 1 */ | ||
977 | |||
978 | if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) { | ||
803 | DEBUG(printk("Invalid minor %d\n", dev)); | 979 | DEBUG(printk("Invalid minor %d\n", dev)); |
804 | return -ENODEV; | 980 | return -ENODEV; |
805 | } | 981 | } |
806 | port = &ports[dev]; | 982 | port = &ports[dev]; |
807 | 983 | ||
808 | DEBUGWRITE(printk("W d%d c %lu (%d/%d)\n", port->port_nbr, count, port->out_count, OUT_BUFFER_SIZE)); | 984 | /* |<- OUT_BUFFER_SIZE ->| |
809 | /* Space to end of buffer */ | 985 | * |<- out_buf_count ->| |
810 | /* | 986 | * |<- trunc_count ->| ...->| |
811 | * out_buffer <c1>012345<- c ->OUT_BUFFER_SIZE | 987 | * ______________________________________________________ |
812 | * outp^ +out_count | 988 | * | free | data | free | |
813 | ^free_outp | 989 | * |_________|___________________|________________________| |
814 | * out_buffer 45<- c ->0123OUT_BUFFER_SIZE | 990 | * ^ rd_ptr ^ wr_ptr |
815 | * +out_count outp^ | ||
816 | * free_outp | ||
817 | * | ||
818 | */ | 991 | */ |
992 | DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n", | ||
993 | port->port_nbr, count, port->active_tr_descr, | ||
994 | port->catch_tr_descr)); | ||
819 | 995 | ||
820 | /* Read variables that may be updated by interrupts */ | 996 | /* Read variables that may be updated by interrupts */ |
821 | spin_lock_irqsave(&port->lock, flags); | 997 | spin_lock_irqsave(&port->lock, flags); |
822 | count = count > OUT_BUFFER_SIZE - port->out_count ? OUT_BUFFER_SIZE - port->out_count : count; | 998 | rd_ptr = port->out_rd_ptr; |
823 | outp = (unsigned long)port->outp; | 999 | out_buf_count = port->out_buf_count; |
824 | free_outp = outp + port->out_count; | ||
825 | spin_unlock_irqrestore(&port->lock, flags); | 1000 | spin_unlock_irqrestore(&port->lock, flags); |
826 | out_buffer = (unsigned long)port->out_buffer; | ||
827 | 1001 | ||
828 | /* Find out where and how much to write */ | 1002 | /* Check if resources are available */ |
829 | if (free_outp >= out_buffer + OUT_BUFFER_SIZE) | 1003 | if (port->tr_running && |
830 | free_outp -= OUT_BUFFER_SIZE; | 1004 | ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) || |
831 | if (free_outp >= outp) | 1005 | out_buf_count >= OUT_BUFFER_SIZE)) { |
832 | c = out_buffer + OUT_BUFFER_SIZE - free_outp; | 1006 | DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev)); |
833 | else | 1007 | return -EAGAIN; |
834 | c = outp - free_outp; | 1008 | } |
835 | if (c > count) | 1009 | |
836 | c = count; | 1010 | buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE; |
1011 | |||
1012 | /* Determine pointer to the first free byte, before copying. */ | ||
1013 | wr_ptr = rd_ptr + out_buf_count; | ||
1014 | if (wr_ptr >= buf_stop_ptr) | ||
1015 | wr_ptr -= OUT_BUFFER_SIZE; | ||
1016 | |||
1017 | /* If we wrap the ring buffer, let the user space program handle it by | ||
1018 | * truncating the data. This could be more elegant, small buffer | ||
1019 | * fragments may occur. | ||
1020 | */ | ||
1021 | bytes_free = OUT_BUFFER_SIZE - out_buf_count; | ||
1022 | if (wr_ptr + bytes_free > buf_stop_ptr) | ||
1023 | bytes_free = buf_stop_ptr - wr_ptr; | ||
1024 | trunc_count = (count < bytes_free) ? count : bytes_free; | ||
837 | 1025 | ||
838 | // DEBUGWRITE(printk("w op %08lX fop %08lX c %lu\n", outp, free_outp, c)); | 1026 | if (copy_from_user(wr_ptr, buf, trunc_count)) |
839 | if (copy_from_user((void*)free_outp, buf, c)) | ||
840 | return -EFAULT; | 1027 | return -EFAULT; |
841 | 1028 | ||
842 | if (c != count) { | 1029 | DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n", |
843 | buf += c; | 1030 | out_buf_count, trunc_count, |
844 | c1 = count - c; | 1031 | port->out_buf_count, port->out_buffer, |
845 | DEBUGWRITE(printk("w2 fi %lu c %lu c1 %lu\n", free_outp-out_buffer, c, c1)); | 1032 | wr_ptr, buf_stop_ptr)); |
846 | if (copy_from_user((void*)out_buffer, buf, c1)) | ||
847 | return -EFAULT; | ||
848 | } | ||
849 | spin_lock_irqsave(&port->lock, flags); | ||
850 | port->out_count += count; | ||
851 | spin_unlock_irqrestore(&port->lock, flags); | ||
852 | 1033 | ||
853 | /* Make sure transmitter/receiver is running */ | 1034 | /* Make sure transmitter/receiver is running */ |
854 | if (!port->started) | 1035 | if (!port->started) { |
855 | { | ||
856 | reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); | 1036 | reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg); |
857 | reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg); | ||
858 | reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); | 1037 | reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg); |
859 | cfg.en = regk_sser_yes; | 1038 | cfg.en = regk_sser_yes; |
860 | tr_cfg.tr_en = port->output; | ||
861 | rec_cfg.rec_en = port->input; | 1039 | rec_cfg.rec_en = port->input; |
862 | REG_WR(sser, port->regi_sser, rw_cfg, cfg); | 1040 | REG_WR(sser, port->regi_sser, rw_cfg, cfg); |
863 | REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); | ||
864 | REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); | 1041 | REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg); |
865 | port->started = 1; | 1042 | port->started = 1; |
866 | } | 1043 | } |
867 | 1044 | ||
868 | if (file->f_flags & O_NONBLOCK) { | 1045 | /* Setup wait if blocking */ |
869 | spin_lock_irqsave(&port->lock, flags); | 1046 | if (!(file->f_flags & O_NONBLOCK)) { |
870 | if (!port->tr_running) { | 1047 | add_wait_queue(&port->out_wait_q, &wait); |
871 | if (!port->use_dma) { | 1048 | set_current_state(TASK_INTERRUPTIBLE); |
872 | reg_sser_rw_intr_mask intr_mask; | ||
873 | intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); | ||
874 | /* Start sender by writing data */ | ||
875 | send_word(port); | ||
876 | /* and enable transmitter ready IRQ */ | ||
877 | intr_mask.trdy = 1; | ||
878 | REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); | ||
879 | } else { | ||
880 | start_dma(port, (unsigned char* volatile )port->outp, c); | ||
881 | } | ||
882 | } | ||
883 | spin_unlock_irqrestore(&port->lock, flags); | ||
884 | DEBUGWRITE(printk("w d%d c %lu NB\n", | ||
885 | port->port_nbr, count)); | ||
886 | return count; | ||
887 | } | 1049 | } |
888 | 1050 | ||
889 | /* Sleep until all sent */ | ||
890 | |||
891 | add_wait_queue(&port->out_wait_q, &wait); | ||
892 | set_current_state(TASK_INTERRUPTIBLE); | ||
893 | spin_lock_irqsave(&port->lock, flags); | 1051 | spin_lock_irqsave(&port->lock, flags); |
894 | if (!port->tr_running) { | 1052 | port->out_buf_count += trunc_count; |
895 | if (!port->use_dma) { | 1053 | if (port->use_dma) { |
896 | reg_sser_rw_intr_mask intr_mask; | 1054 | start_dma_out(port, wr_ptr, trunc_count); |
897 | intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); | 1055 | } else if (!port->tr_running) { |
898 | /* Start sender by writing data */ | 1056 | reg_sser_rw_intr_mask intr_mask; |
899 | send_word(port); | 1057 | intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask); |
900 | /* and enable transmitter ready IRQ */ | 1058 | /* Start sender by writing data */ |
901 | intr_mask.trdy = 1; | 1059 | send_word(port); |
902 | REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); | 1060 | /* and enable transmitter ready IRQ */ |
903 | } else { | 1061 | intr_mask.trdy = 1; |
904 | start_dma(port, port->outp, c); | 1062 | REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask); |
905 | } | ||
906 | } | 1063 | } |
907 | spin_unlock_irqrestore(&port->lock, flags); | 1064 | spin_unlock_irqrestore(&port->lock, flags); |
1065 | |||
1066 | /* Exit if non blocking */ | ||
1067 | if (file->f_flags & O_NONBLOCK) { | ||
1068 | DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n", | ||
1069 | port->port_nbr, trunc_count, | ||
1070 | REG_RD_INT(dma, port->regi_dmaout, r_intr))); | ||
1071 | return trunc_count; | ||
1072 | } | ||
1073 | |||
908 | schedule(); | 1074 | schedule(); |
909 | set_current_state(TASK_RUNNING); | 1075 | set_current_state(TASK_RUNNING); |
910 | remove_wait_queue(&port->out_wait_q, &wait); | 1076 | remove_wait_queue(&port->out_wait_q, &wait); |
1077 | |||
911 | if (signal_pending(current)) | 1078 | if (signal_pending(current)) |
912 | { | ||
913 | return -EINTR; | 1079 | return -EINTR; |
914 | } | 1080 | } |
915 | DEBUGWRITE(printk("w d%d c %lu\n", port->port_nbr, count)); | 1081 | DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n", |
916 | return count; | 1082 | port->port_nbr, trunc_count)); |
1083 | return trunc_count; | ||
917 | } | 1084 | } |
918 | 1085 | ||
919 | static ssize_t sync_serial_read(struct file * file, char * buf, | 1086 | static ssize_t sync_serial_read(struct file * file, char * buf, |
@@ -926,7 +1093,7 @@ static ssize_t sync_serial_read(struct file * file, char * buf, | |||
926 | unsigned char* end; | 1093 | unsigned char* end; |
927 | unsigned long flags; | 1094 | unsigned long flags; |
928 | 1095 | ||
929 | if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled) | 1096 | if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) |
930 | { | 1097 | { |
931 | DEBUG(printk("Invalid minor %d\n", dev)); | 1098 | DEBUG(printk("Invalid minor %d\n", dev)); |
932 | return -ENODEV; | 1099 | return -ENODEV; |
@@ -949,7 +1116,6 @@ static ssize_t sync_serial_read(struct file * file, char * buf, | |||
949 | port->started = 1; | 1116 | port->started = 1; |
950 | } | 1117 | } |
951 | 1118 | ||
952 | |||
953 | /* Calculate number of available bytes */ | 1119 | /* Calculate number of available bytes */ |
954 | /* Save pointers to avoid that they are modified by interrupt */ | 1120 | /* Save pointers to avoid that they are modified by interrupt */ |
955 | spin_lock_irqsave(&port->lock, flags); | 1121 | spin_lock_irqsave(&port->lock, flags); |
@@ -958,16 +1124,14 @@ static ssize_t sync_serial_read(struct file * file, char * buf, | |||
958 | spin_unlock_irqrestore(&port->lock, flags); | 1124 | spin_unlock_irqrestore(&port->lock, flags); |
959 | while ((start == end) && !port->full) /* No data */ | 1125 | while ((start == end) && !port->full) /* No data */ |
960 | { | 1126 | { |
1127 | DEBUGREAD(printk(KERN_DEBUG "&")); | ||
961 | if (file->f_flags & O_NONBLOCK) | 1128 | if (file->f_flags & O_NONBLOCK) |
962 | { | ||
963 | return -EAGAIN; | 1129 | return -EAGAIN; |
964 | } | ||
965 | 1130 | ||
966 | interruptible_sleep_on(&port->in_wait_q); | 1131 | interruptible_sleep_on(&port->in_wait_q); |
967 | if (signal_pending(current)) | 1132 | if (signal_pending(current)) |
968 | { | ||
969 | return -EINTR; | 1133 | return -EINTR; |
970 | } | 1134 | |
971 | spin_lock_irqsave(&port->lock, flags); | 1135 | spin_lock_irqsave(&port->lock, flags); |
972 | start = (unsigned char*)port->readp; /* cast away volatile */ | 1136 | start = (unsigned char*)port->readp; /* cast away volatile */ |
973 | end = (unsigned char*)port->writep; /* cast away volatile */ | 1137 | end = (unsigned char*)port->writep; /* cast away volatile */ |
@@ -1004,83 +1168,105 @@ static void send_word(sync_port* port) | |||
1004 | switch(tr_cfg.sample_size) | 1168 | switch(tr_cfg.sample_size) |
1005 | { | 1169 | { |
1006 | case 8: | 1170 | case 8: |
1007 | port->out_count--; | 1171 | port->out_buf_count--; |
1008 | tr_data.data = *port->outp++; | 1172 | tr_data.data = *port->out_rd_ptr++; |
1009 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); | 1173 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); |
1010 | if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) | 1174 | if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) |
1011 | port->outp = port->out_buffer; | 1175 | port->out_rd_ptr = port->out_buffer; |
1012 | break; | 1176 | break; |
1013 | case 12: | 1177 | case 12: |
1014 | { | 1178 | { |
1015 | int data = (*port->outp++) << 8; | 1179 | int data = (*port->out_rd_ptr++) << 8; |
1016 | data |= *port->outp++; | 1180 | data |= *port->out_rd_ptr++; |
1017 | port->out_count-=2; | 1181 | port->out_buf_count -= 2; |
1018 | tr_data.data = data; | 1182 | tr_data.data = data; |
1019 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); | 1183 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); |
1020 | if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) | 1184 | if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) |
1021 | port->outp = port->out_buffer; | 1185 | port->out_rd_ptr = port->out_buffer; |
1022 | } | 1186 | } |
1023 | break; | 1187 | break; |
1024 | case 16: | 1188 | case 16: |
1025 | port->out_count-=2; | 1189 | port->out_buf_count -= 2; |
1026 | tr_data.data = *(unsigned short *)port->outp; | 1190 | tr_data.data = *(unsigned short *)port->out_rd_ptr; |
1027 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); | 1191 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); |
1028 | port->outp+=2; | 1192 | port->out_rd_ptr += 2; |
1029 | if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) | 1193 | if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) |
1030 | port->outp = port->out_buffer; | 1194 | port->out_rd_ptr = port->out_buffer; |
1031 | break; | 1195 | break; |
1032 | case 24: | 1196 | case 24: |
1033 | port->out_count-=3; | 1197 | port->out_buf_count -= 3; |
1034 | tr_data.data = *(unsigned short *)port->outp; | 1198 | tr_data.data = *(unsigned short *)port->out_rd_ptr; |
1035 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); | 1199 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); |
1036 | port->outp+=2; | 1200 | port->out_rd_ptr += 2; |
1037 | tr_data.data = *port->outp++; | 1201 | tr_data.data = *port->out_rd_ptr++; |
1038 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); | 1202 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); |
1039 | if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) | 1203 | if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) |
1040 | port->outp = port->out_buffer; | 1204 | port->out_rd_ptr = port->out_buffer; |
1041 | break; | 1205 | break; |
1042 | case 32: | 1206 | case 32: |
1043 | port->out_count-=4; | 1207 | port->out_buf_count -= 4; |
1044 | tr_data.data = *(unsigned short *)port->outp; | 1208 | tr_data.data = *(unsigned short *)port->out_rd_ptr; |
1045 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); | 1209 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); |
1046 | port->outp+=2; | 1210 | port->out_rd_ptr += 2; |
1047 | tr_data.data = *(unsigned short *)port->outp; | 1211 | tr_data.data = *(unsigned short *)port->out_rd_ptr; |
1048 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); | 1212 | REG_WR(sser, port->regi_sser, rw_tr_data, tr_data); |
1049 | port->outp+=2; | 1213 | port->out_rd_ptr += 2; |
1050 | if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) | 1214 | if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE) |
1051 | port->outp = port->out_buffer; | 1215 | port->out_rd_ptr = port->out_buffer; |
1052 | break; | 1216 | break; |
1053 | } | 1217 | } |
1054 | } | 1218 | } |
1055 | 1219 | ||
1056 | 1220 | static void start_dma_out(struct sync_port *port, | |
1057 | static void start_dma(struct sync_port* port, const char* data, int count) | 1221 | const char *data, int count) |
1058 | { | 1222 | { |
1059 | port->tr_running = 1; | 1223 | port->active_tr_descr->buf = (char *) virt_to_phys((char *) data); |
1060 | port->out_descr.buf = (char*)virt_to_phys((char*)data); | 1224 | port->active_tr_descr->after = port->active_tr_descr->buf + count; |
1061 | port->out_descr.after = port->out_descr.buf + count; | 1225 | port->active_tr_descr->intr = 1; |
1062 | port->out_descr.eol = port->out_descr.intr = 1; | 1226 | |
1227 | port->active_tr_descr->eol = 1; | ||
1228 | port->prev_tr_descr->eol = 0; | ||
1229 | |||
1230 | DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n", | ||
1231 | port->prev_tr_descr, port->active_tr_descr)); | ||
1232 | port->prev_tr_descr = port->active_tr_descr; | ||
1233 | port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next); | ||
1234 | |||
1235 | if (!port->tr_running) { | ||
1236 | reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, | ||
1237 | rw_tr_cfg); | ||
1063 | 1238 | ||
1064 | port->out_context.saved_data = (dma_descr_data*)virt_to_phys(&port->out_descr); | 1239 | port->out_context.next = 0; |
1065 | port->out_context.saved_data_buf = port->out_descr.buf; | 1240 | port->out_context.saved_data = |
1241 | (dma_descr_data *)virt_to_phys(port->prev_tr_descr); | ||
1242 | port->out_context.saved_data_buf = port->prev_tr_descr->buf; | ||
1243 | |||
1244 | DMA_START_CONTEXT(port->regi_dmaout, | ||
1245 | virt_to_phys((char *)&port->out_context)); | ||
1246 | |||
1247 | tr_cfg.tr_en = regk_sser_yes; | ||
1248 | REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); | ||
1249 | DEBUGTRDMA(printk(KERN_DEBUG "dma s\n");); | ||
1250 | } else { | ||
1251 | DMA_CONTINUE_DATA(port->regi_dmaout); | ||
1252 | DEBUGTRDMA(printk(KERN_DEBUG "dma c\n");); | ||
1253 | } | ||
1066 | 1254 | ||
1067 | DMA_START_CONTEXT(port->regi_dmaout, virt_to_phys((char*)&port->out_context)); | 1255 | port->tr_running = 1; |
1068 | DEBUGTXINT(printk("dma %08lX c %d\n", (unsigned long)data, count)); | ||
1069 | } | 1256 | } |
1070 | 1257 | ||
1071 | static void start_dma_in(sync_port* port) | 1258 | static void start_dma_in(sync_port *port) |
1072 | { | 1259 | { |
1073 | int i; | 1260 | int i; |
1074 | char* buf; | 1261 | char *buf; |
1075 | port->writep = port->flip; | 1262 | port->writep = port->flip; |
1076 | 1263 | ||
1077 | if (port->writep > port->flip + port->in_buffer_size) | 1264 | if (port->writep > port->flip + port->in_buffer_size) { |
1078 | { | ||
1079 | panic("Offset too large in sync serial driver\n"); | 1265 | panic("Offset too large in sync serial driver\n"); |
1080 | return; | 1266 | return; |
1081 | } | 1267 | } |
1082 | buf = (char*)virt_to_phys(port->in_buffer); | 1268 | buf = (char*)virt_to_phys(port->in_buffer); |
1083 | for (i = 0; i < NUM_IN_DESCR; i++) { | 1269 | for (i = 0; i < NBR_IN_DESCR; i++) { |
1084 | port->in_descr[i].buf = buf; | 1270 | port->in_descr[i].buf = buf; |
1085 | port->in_descr[i].after = buf + port->inbufchunk; | 1271 | port->in_descr[i].after = buf + port->inbufchunk; |
1086 | port->in_descr[i].intr = 1; | 1272 | port->in_descr[i].intr = 1; |
@@ -1092,59 +1278,126 @@ static void start_dma_in(sync_port* port) | |||
1092 | port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); | 1278 | port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); |
1093 | port->in_descr[i-1].eol = regk_sser_yes; | 1279 | port->in_descr[i-1].eol = regk_sser_yes; |
1094 | port->next_rx_desc = &port->in_descr[0]; | 1280 | port->next_rx_desc = &port->in_descr[0]; |
1095 | port->prev_rx_desc = &port->in_descr[NUM_IN_DESCR - 1]; | 1281 | port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1]; |
1096 | port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); | 1282 | port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]); |
1097 | port->in_context.saved_data_buf = port->in_descr[0].buf; | 1283 | port->in_context.saved_data_buf = port->in_descr[0].buf; |
1098 | DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context)); | 1284 | DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context)); |
1099 | } | 1285 | } |
1100 | 1286 | ||
1101 | #ifdef SYNC_SER_DMA | 1287 | #ifdef SYNC_SER_DMA |
1102 | static irqreturn_t tr_interrupt(int irq, void *dev_id, struct pt_regs * regs) | 1288 | static irqreturn_t tr_interrupt(int irq, void *dev_id) |
1103 | { | 1289 | { |
1104 | reg_dma_r_masked_intr masked; | 1290 | reg_dma_r_masked_intr masked; |
1105 | reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; | 1291 | reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; |
1292 | reg_dma_rw_stat stat; | ||
1106 | int i; | 1293 | int i; |
1107 | struct dma_descr_data *descr; | ||
1108 | unsigned int sentl; | ||
1109 | int found = 0; | 1294 | int found = 0; |
1295 | int stop_sser = 0; | ||
1110 | 1296 | ||
1111 | for (i = 0; i < NUMBER_OF_PORTS; i++) | 1297 | for (i = 0; i < NBR_PORTS; i++) { |
1112 | { | ||
1113 | sync_port *port = &ports[i]; | 1298 | sync_port *port = &ports[i]; |
1114 | if (!port->enabled || !port->use_dma ) | 1299 | if (!port->enabled || !port->use_dma) |
1115 | continue; | 1300 | continue; |
1116 | 1301 | ||
1302 | /* IRQ active for the port? */ | ||
1117 | masked = REG_RD(dma, port->regi_dmaout, r_masked_intr); | 1303 | masked = REG_RD(dma, port->regi_dmaout, r_masked_intr); |
1304 | if (!masked.data) | ||
1305 | continue; | ||
1118 | 1306 | ||
1119 | if (masked.data) /* IRQ active for the port? */ | 1307 | found = 1; |
1120 | { | 1308 | |
1121 | found = 1; | 1309 | /* Check if we should stop the DMA transfer */ |
1122 | /* Clear IRQ */ | 1310 | stat = REG_RD(dma, port->regi_dmaout, rw_stat); |
1123 | REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr); | 1311 | if (stat.list_state == regk_dma_data_at_eol) |
1124 | descr = &port->out_descr; | 1312 | stop_sser = 1; |
1125 | sentl = descr->after - descr->buf; | 1313 | |
1126 | port->out_count -= sentl; | 1314 | /* Clear IRQ */ |
1127 | port->outp += sentl; | 1315 | REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr); |
1128 | if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE) | 1316 | |
1129 | port->outp = port->out_buffer; | 1317 | if (!stop_sser) { |
1130 | if (port->out_count) { | 1318 | /* The DMA has completed a descriptor, EOL was not |
1131 | int c; | 1319 | * encountered, so step relevant descriptor and |
1132 | c = port->out_buffer + OUT_BUFFER_SIZE - port->outp; | 1320 | * datapointers forward. */ |
1133 | if (c > port->out_count) | 1321 | int sent; |
1134 | c = port->out_count; | 1322 | sent = port->catch_tr_descr->after - |
1135 | DEBUGTXINT(printk("tx_int DMAWRITE %i %i\n", sentl, c)); | 1323 | port->catch_tr_descr->buf; |
1136 | start_dma(port, port->outp, c); | 1324 | DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t" |
1137 | } else { | 1325 | "in descr %p (ac: %p)\n", |
1138 | DEBUGTXINT(printk("tx_int DMA stop %i\n", sentl)); | 1326 | port->out_buf_count, sent, |
1139 | port->tr_running = 0; | 1327 | port->out_buf_count - sent, |
1328 | port->catch_tr_descr, | ||
1329 | port->active_tr_descr);); | ||
1330 | port->out_buf_count -= sent; | ||
1331 | port->catch_tr_descr = | ||
1332 | phys_to_virt((int) port->catch_tr_descr->next); | ||
1333 | port->out_rd_ptr = | ||
1334 | phys_to_virt((int) port->catch_tr_descr->buf); | ||
1335 | } else { | ||
1336 | int i, sent; | ||
1337 | /* EOL handler. | ||
1338 | * Note that if an EOL was encountered during the irq | ||
1339 | * locked section of sync_ser_write the DMA will be | ||
1340 | * restarted and the eol flag will be cleared. | ||
1341 | * The remaining descriptors will be traversed by | ||
1342 | * the descriptor interrupts as usual. | ||
1343 | */ | ||
1344 | i = 0; | ||
1345 | while (!port->catch_tr_descr->eol) { | ||
1346 | sent = port->catch_tr_descr->after - | ||
1347 | port->catch_tr_descr->buf; | ||
1348 | DEBUGOUTBUF(printk(KERN_DEBUG | ||
1349 | "traversing descr %p -%d (%d)\n", | ||
1350 | port->catch_tr_descr, | ||
1351 | sent, | ||
1352 | port->out_buf_count)); | ||
1353 | port->out_buf_count -= sent; | ||
1354 | port->catch_tr_descr = phys_to_virt( | ||
1355 | (int)port->catch_tr_descr->next); | ||
1356 | i++; | ||
1357 | if (i >= NBR_OUT_DESCR) { | ||
1358 | /* TODO: Reset and recover */ | ||
1359 | panic("sync_serial: missing eol"); | ||
1360 | } | ||
1140 | } | 1361 | } |
1141 | wake_up_interruptible(&port->out_wait_q); /* wake up the waiting process */ | 1362 | sent = port->catch_tr_descr->after - |
1363 | port->catch_tr_descr->buf; | ||
1364 | DEBUGOUTBUF(printk(KERN_DEBUG | ||
1365 | "eol at descr %p -%d (%d)\n", | ||
1366 | port->catch_tr_descr, | ||
1367 | sent, | ||
1368 | port->out_buf_count)); | ||
1369 | |||
1370 | port->out_buf_count -= sent; | ||
1371 | |||
1372 | /* Update read pointer to first free byte, we | ||
1373 | * may already be writing data there. */ | ||
1374 | port->out_rd_ptr = | ||
1375 | phys_to_virt((int) port->catch_tr_descr->after); | ||
1376 | if (port->out_rd_ptr > port->out_buffer + | ||
1377 | OUT_BUFFER_SIZE) | ||
1378 | port->out_rd_ptr = port->out_buffer; | ||
1379 | |||
1380 | reg_sser_rw_tr_cfg tr_cfg = | ||
1381 | REG_RD(sser, port->regi_sser, rw_tr_cfg); | ||
1382 | DEBUGTXINT(printk(KERN_DEBUG | ||
1383 | "tr_int DMA stop %d, set catch @ %p\n", | ||
1384 | port->out_buf_count, | ||
1385 | port->active_tr_descr)); | ||
1386 | if (port->out_buf_count != 0) | ||
1387 | printk(KERN_CRIT "sync_ser: buffer not " | ||
1388 | "empty after eol.\n"); | ||
1389 | port->catch_tr_descr = port->active_tr_descr; | ||
1390 | port->tr_running = 0; | ||
1391 | tr_cfg.tr_en = regk_sser_no; | ||
1392 | REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg); | ||
1142 | } | 1393 | } |
1394 | /* wake up the waiting process */ | ||
1395 | wake_up_interruptible(&port->out_wait_q); | ||
1143 | } | 1396 | } |
1144 | return IRQ_RETVAL(found); | 1397 | return IRQ_RETVAL(found); |
1145 | } /* tr_interrupt */ | 1398 | } /* tr_interrupt */ |
1146 | 1399 | ||
1147 | static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs) | 1400 | static irqreturn_t rx_interrupt(int irq, void *dev_id) |
1148 | { | 1401 | { |
1149 | reg_dma_r_masked_intr masked; | 1402 | reg_dma_r_masked_intr masked; |
1150 | reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; | 1403 | reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes}; |
@@ -1152,7 +1405,7 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
1152 | int i; | 1405 | int i; |
1153 | int found = 0; | 1406 | int found = 0; |
1154 | 1407 | ||
1155 | for (i = 0; i < NUMBER_OF_PORTS; i++) | 1408 | for (i = 0; i < NBR_PORTS; i++) |
1156 | { | 1409 | { |
1157 | sync_port *port = &ports[i]; | 1410 | sync_port *port = &ports[i]; |
1158 | 1411 | ||
@@ -1166,7 +1419,7 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
1166 | found = 1; | 1419 | found = 1; |
1167 | while (REG_RD(dma, port->regi_dmain, rw_data) != | 1420 | while (REG_RD(dma, port->regi_dmain, rw_data) != |
1168 | virt_to_phys(port->next_rx_desc)) { | 1421 | virt_to_phys(port->next_rx_desc)) { |
1169 | 1422 | DEBUGRXINT(printk(KERN_DEBUG "!")); | |
1170 | if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) { | 1423 | if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) { |
1171 | int first_size = port->flip + port->in_buffer_size - port->writep; | 1424 | int first_size = port->flip + port->in_buffer_size - port->writep; |
1172 | memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size); | 1425 | memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size); |
@@ -1185,11 +1438,16 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
1185 | port->full = 1; | 1438 | port->full = 1; |
1186 | } | 1439 | } |
1187 | 1440 | ||
1188 | port->next_rx_desc->eol = 0; | 1441 | port->next_rx_desc->eol = 1; |
1189 | port->prev_rx_desc->eol = 1; | 1442 | port->prev_rx_desc->eol = 0; |
1190 | port->prev_rx_desc = phys_to_virt((unsigned)port->next_rx_desc); | 1443 | /* Cache bug workaround */ |
1444 | flush_dma_descr(port->prev_rx_desc, 0); | ||
1445 | port->prev_rx_desc = port->next_rx_desc; | ||
1191 | port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next); | 1446 | port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next); |
1192 | wake_up_interruptible(&port->in_wait_q); /* wake up the waiting process */ | 1447 | /* Cache bug workaround */ |
1448 | flush_dma_descr(port->prev_rx_desc, 1); | ||
1449 | /* wake up the waiting process */ | ||
1450 | wake_up_interruptible(&port->in_wait_q); | ||
1193 | DMA_CONTINUE(port->regi_dmain); | 1451 | DMA_CONTINUE(port->regi_dmain); |
1194 | REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr); | 1452 | REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr); |
1195 | 1453 | ||
@@ -1201,15 +1459,15 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs) | |||
1201 | #endif /* SYNC_SER_DMA */ | 1459 | #endif /* SYNC_SER_DMA */ |
1202 | 1460 | ||
1203 | #ifdef SYNC_SER_MANUAL | 1461 | #ifdef SYNC_SER_MANUAL |
1204 | static irqreturn_t manual_interrupt(int irq, void *dev_id, struct pt_regs * regs) | 1462 | static irqreturn_t manual_interrupt(int irq, void *dev_id) |
1205 | { | 1463 | { |
1206 | int i; | 1464 | int i; |
1207 | int found = 0; | 1465 | int found = 0; |
1208 | reg_sser_r_masked_intr masked; | 1466 | reg_sser_r_masked_intr masked; |
1209 | 1467 | ||
1210 | for (i = 0; i < NUMBER_OF_PORTS; i++) | 1468 | for (i = 0; i < NBR_PORTS; i++) |
1211 | { | 1469 | { |
1212 | sync_port* port = &ports[i]; | 1470 | sync_port *port = &ports[i]; |
1213 | 1471 | ||
1214 | if (!port->enabled || port->use_dma) | 1472 | if (!port->enabled || port->use_dma) |
1215 | { | 1473 | { |
@@ -1263,7 +1521,7 @@ static irqreturn_t manual_interrupt(int irq, void *dev_id, struct pt_regs * regs | |||
1263 | if (masked.trdy) /* Transmitter ready? */ | 1521 | if (masked.trdy) /* Transmitter ready? */ |
1264 | { | 1522 | { |
1265 | found = 1; | 1523 | found = 1; |
1266 | if (port->out_count > 0) /* More data to send */ | 1524 | if (port->out_buf_count > 0) /* More data to send */ |
1267 | send_word(port); | 1525 | send_word(port); |
1268 | else /* transmission finished */ | 1526 | else /* transmission finished */ |
1269 | { | 1527 | { |