aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/irda
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-01-08 11:16:39 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-02-09 10:37:55 -0500
commit3d26db137ac3169623a132ea310d26af6a48bf88 (patch)
tree6cb6c775b1011f35d0d8038d840bcbadfbb19e17 /drivers/net/irda
parent0e888ee31566c3f5071474ddd68457a7ad2ae5ac (diff)
NET: sa11x0-ir: split SIR and FIR tx functions
Split the SIR and FIR transmit functions, as they behave differently. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/net/irda')
-rw-r--r--drivers/net/irda/sa1100_ir.c118
1 files changed, 66 insertions, 52 deletions
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index bfae34fa7a9e..8b8cf4152604 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -68,6 +68,8 @@ struct sa1100_irda {
68 68
69 iobuff_t tx_buff; 69 iobuff_t tx_buff;
70 iobuff_t rx_buff; 70 iobuff_t rx_buff;
71
72 int (*tx_start)(struct sk_buff *, struct net_device *, struct sa1100_irda *);
71}; 73};
72 74
73static int sa1100_irda_set_speed(struct sa1100_irda *, int); 75static int sa1100_irda_set_speed(struct sa1100_irda *, int);
@@ -140,6 +142,63 @@ static void sa1100_irda_check_speed(struct sa1100_irda *si)
140} 142}
141 143
142/* 144/*
145 * HP-SIR format support.
146 */
147static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev,
148 struct sa1100_irda *si)
149{
150 si->tx_buff.data = si->tx_buff.head;
151 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
152 si->tx_buff.truesize);
153
154 /*
155 * Set the transmit interrupt enable. This will fire off an
156 * interrupt immediately. Note that we disable the receiver
157 * so we won't get spurious characters received.
158 */
159 Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE;
160
161 dev_kfree_skb(skb);
162
163 return NETDEV_TX_OK;
164}
165
166/*
167 * FIR format support.
168 */
169static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev,
170 struct sa1100_irda *si)
171{
172 int mtt = irda_get_mtt(skb);
173
174 si->dma_tx.skb = skb;
175 si->dma_tx.dma = dma_map_single(si->dev, skb->data, skb->len,
176 DMA_TO_DEVICE);
177 if (dma_mapping_error(si->dev, si->dma_tx.dma)) {
178 si->dma_tx.skb = NULL;
179 netif_wake_queue(dev);
180 dev->stats.tx_dropped++;
181 dev_kfree_skb(skb);
182 return NETDEV_TX_OK;
183 }
184
185 sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len);
186
187 /*
188 * If we have a mean turn-around time, impose the specified
189 * specified delay. We could shorten this by timing from
190 * the point we received the packet.
191 */
192 if (mtt)
193 udelay(mtt);
194
195 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
196
197 return NETDEV_TX_OK;
198}
199
200
201/*
143 * Set the IrDA communications speed. 202 * Set the IrDA communications speed.
144 */ 203 */
145static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed) 204static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
@@ -176,6 +235,7 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
176 si->pdata->set_speed(si->dev, speed); 235 si->pdata->set_speed(si->dev, speed);
177 236
178 si->speed = speed; 237 si->speed = speed;
238 si->tx_start = sa1100_irda_sir_tx_start;
179 239
180 local_irq_restore(flags); 240 local_irq_restore(flags);
181 ret = 0; 241 ret = 0;
@@ -191,6 +251,7 @@ static int sa1100_irda_set_speed(struct sa1100_irda *si, int speed)
191 Ser2UTCR3 = 0; 251 Ser2UTCR3 = 0;
192 252
193 si->speed = speed; 253 si->speed = speed;
254 si->tx_start = sa1100_irda_fir_tx_start;
194 255
195 if (si->pdata->set_speed) 256 if (si->pdata->set_speed)
196 si->pdata->set_speed(si->dev, speed); 257 si->pdata->set_speed(si->dev, speed);
@@ -538,66 +599,19 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
538 if (speed != si->speed && speed != -1) 599 if (speed != si->speed && speed != -1)
539 si->newspeed = speed; 600 si->newspeed = speed;
540 601
541 /* 602 /* If this is an empty frame, we can bypass a lot. */
542 * If this is an empty frame, we can bypass a lot.
543 */
544 if (skb->len == 0) { 603 if (skb->len == 0) {
545 sa1100_irda_check_speed(si); 604 sa1100_irda_check_speed(si);
546 dev_kfree_skb(skb); 605 dev_kfree_skb(skb);
547 return NETDEV_TX_OK; 606 return NETDEV_TX_OK;
548 } 607 }
549 608
550 if (!IS_FIR(si)) { 609 netif_stop_queue(dev);
551 netif_stop_queue(dev);
552
553 si->tx_buff.data = si->tx_buff.head;
554 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
555 si->tx_buff.truesize);
556
557 /*
558 * Set the transmit interrupt enable. This will fire
559 * off an interrupt immediately. Note that we disable
560 * the receiver so we won't get spurious characteres
561 * received.
562 */
563 Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE;
564
565 dev_kfree_skb(skb);
566 } else {
567 int mtt = irda_get_mtt(skb);
568
569 /*
570 * We must not be transmitting...
571 */
572 BUG_ON(si->dma_tx.skb);
573
574 netif_stop_queue(dev);
575
576 si->dma_tx.skb = skb;
577 si->dma_tx.dma = dma_map_single(si->dev, skb->data,
578 skb->len, DMA_TO_DEVICE);
579 if (dma_mapping_error(si->dev, si->dma_tx.dma)) {
580 si->dma_tx.skb = NULL;
581 netif_wake_queue(dev);
582 dev->stats.tx_dropped++;
583 dev_kfree_skb(skb);
584 return NETDEV_TX_OK;
585 }
586
587 sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len);
588
589 /*
590 * If we have a mean turn-around time, impose the specified
591 * specified delay. We could shorten this by timing from
592 * the point we received the packet.
593 */
594 if (mtt)
595 udelay(mtt);
596 610
597 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; 611 /* We must not already have a skb to transmit... */
598 } 612 BUG_ON(si->dma_tx.skb);
599 613
600 return NETDEV_TX_OK; 614 return si->tx_start(skb, dev, si);
601} 615}
602 616
603static int 617static int