aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/irda
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-01-13 06:50:03 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-03-07 06:46:19 -0500
commitd138dacb4b8255c02e4380ce2aadab758a99d2c1 (patch)
treee51c5a8d74d96b9773cf418549e9f006b16342d8 /drivers/net/irda
parentbf95154ff6c84e04afd9ba7f2b54a4628beefdb9 (diff)
NET: sa11x0-ir: add DMA support for SIR transmit mode
As the DMA engine API allows DMA channels to be reconfigured on the fly, we can now support switching the DMA channel configuration to support SIR transmit DMA without needing to claim an additional physical DMA channel - thereby using up half the DMA channels just for one driver. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/net/irda')
-rw-r--r--drivers/net/irda/sa1100_ir.c103
1 files changed, 59 insertions, 44 deletions
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 620a48d00e2b..a0d1913a58d3 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -81,6 +81,13 @@ static int sa1100_irda_set_speed(struct sa1100_irda *, int);
81 81
82#define HPSIR_MAX_RXLEN 2047 82#define HPSIR_MAX_RXLEN 2047
83 83
84static struct dma_slave_config sa1100_irda_sir_tx = {
85 .direction = DMA_TO_DEVICE,
86 .dst_addr = __PREG(Ser2UTDR),
87 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
88 .dst_maxburst = 4,
89};
90
84static struct dma_slave_config sa1100_irda_fir_rx = { 91static struct dma_slave_config sa1100_irda_fir_rx = {
85 .direction = DMA_FROM_DEVICE, 92 .direction = DMA_FROM_DEVICE,
86 .src_addr = __PREG(Ser2HSDR), 93 .src_addr = __PREG(Ser2HSDR),
@@ -215,6 +222,36 @@ static void sa1100_irda_check_speed(struct sa1100_irda *si)
215/* 222/*
216 * HP-SIR format support. 223 * HP-SIR format support.
217 */ 224 */
225static void sa1100_irda_sirtxdma_irq(void *id)
226{
227 struct net_device *dev = id;
228 struct sa1100_irda *si = netdev_priv(dev);
229
230 dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE);
231 dev_kfree_skb(si->dma_tx.skb);
232 si->dma_tx.skb = NULL;
233
234 dev->stats.tx_packets++;
235 dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg);
236
237 /* We need to ensure that the transmitter has finished. */
238 do
239 rmb();
240 while (Ser2UTSR1 & UTSR1_TBY);
241
242 /*
243 * Ok, we've finished transmitting. Now enable the receiver.
244 * Sometimes we get a receive IRQ immediately after a transmit...
245 */
246 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
247 Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
248
249 sa1100_irda_check_speed(si);
250
251 /* I'm hungry! */
252 netif_wake_queue(dev);
253}
254
218static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev, 255static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev,
219 struct sa1100_irda *si) 256 struct sa1100_irda *si)
220{ 257{
@@ -222,14 +259,22 @@ static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev,
222 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, 259 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
223 si->tx_buff.truesize); 260 si->tx_buff.truesize);
224 261
262 si->dma_tx.skb = skb;
263 sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len);
264 if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
265 si->dma_tx.skb = NULL;
266 netif_wake_queue(dev);
267 dev->stats.tx_dropped++;
268 return NETDEV_TX_OK;
269 }
270
271 sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev);
272
225 /* 273 /*
226 * Set the transmit interrupt enable. This will fire off an 274 * The mean turn-around time is enforced by XBOF padding,
227 * interrupt immediately. Note that we disable the receiver 275 * so we don't have to do anything special here.
228 * so we won't get spurious characters received.
229 */ 276 */
230 Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE; 277 Ser2UTCR3 = UTCR3_TXE;
231
232 dev_kfree_skb(skb);
233 278
234 return NETDEV_TX_OK; 279 return NETDEV_TX_OK;
235} 280}
@@ -288,43 +333,6 @@ static irqreturn_t sa1100_irda_sir_irq(struct net_device *dev, struct sa1100_ird
288 333
289 } 334 }
290 335
291 if (status & UTSR0_TFS && si->tx_buff.len) {
292 /*
293 * Transmitter FIFO is not full
294 */
295 do {
296 Ser2UTDR = *si->tx_buff.data++;
297 si->tx_buff.len -= 1;
298 } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
299
300 if (si->tx_buff.len == 0) {
301 dev->stats.tx_packets++;
302 dev->stats.tx_bytes += si->tx_buff.data -
303 si->tx_buff.head;
304
305 /*
306 * We need to ensure that the transmitter has
307 * finished.
308 */
309 do
310 rmb();
311 while (Ser2UTSR1 & UTSR1_TBY);
312
313 /*
314 * Ok, we've finished transmitting. Now enable
315 * the receiver. Sometimes we get a receive IRQ
316 * immediately after a transmit...
317 */
318 Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
319 Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
320
321 sa1100_irda_check_speed(si);
322
323 /* I'm hungry! */
324 netif_wake_queue(dev);
325 }
326 }
327
328 return IRQ_HANDLED; 336 return IRQ_HANDLED;
329} 337}
330 338
@@ -545,8 +553,11 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
545 brd = 3686400 / (16 * speed) - 1; 553 brd = 3686400 / (16 * speed) - 1;
546 554
547 /* Stop the receive DMA, and configure transmit. */ 555 /* Stop the receive DMA, and configure transmit. */
548 if (IS_FIR(si)) 556 if (IS_FIR(si)) {
549 dmaengine_terminate_all(si->dma_rx.chan); 557 dmaengine_terminate_all(si->dma_rx.chan);
558 dmaengine_slave_config(si->dma_tx.chan,
559 &sa1100_irda_sir_tx);
560 }
550 561
551 local_irq_save(flags); 562 local_irq_save(flags);
552 563
@@ -574,6 +585,10 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
574 break; 585 break;
575 586
576 case 4000000: 587 case 4000000:
588 if (!IS_FIR(si))
589 dmaengine_slave_config(si->dma_tx.chan,
590 &sa1100_irda_fir_tx);
591
577 local_irq_save(flags); 592 local_irq_save(flags);
578 593
579 Ser2HSSR0 = 0xff; 594 Ser2HSSR0 = 0xff;