aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-01-12 08:56:28 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-02-09 10:37:57 -0500
commit3c500a35544d6270b127bce7d4c5a15ef454b9e2 (patch)
tree9a2fc13181e2b871d8804f55fc717fdbe2387f62 /drivers/net
parent04b7fc4dec4fcb61dbe022bbaffda8ea37c39430 (diff)
NET: sa11x0-ir: split si->dev for IrDA transmit and receive buffers
The sa11x0-ir device is not the device which is doing the DMA, the DMA is being performed by a separate DMA engine. Split the struct device associated with each DMA channel from the main struct device, but for the time being initialize it from the main struct device. This is another preparatory step to converting this driver to use the DMA engine API. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/irda/sa1100_ir.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index be67bdc0ca02..9c748f38b9d5 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -44,6 +44,7 @@ static int tx_lpm;
44static int max_rate = 4000000; 44static int max_rate = 4000000;
45 45
46struct sa1100_buf { 46struct sa1100_buf {
47 struct device *dev;
47 struct sk_buff *skb; 48 struct sk_buff *skb;
48 struct scatterlist sg; 49 struct scatterlist sg;
49 dma_regs_t *regs; 50 dma_regs_t *regs;
@@ -99,7 +100,7 @@ static int sa1100_irda_rx_alloc(struct sa1100_irda *si)
99 skb_reserve(si->dma_rx.skb, 1); 100 skb_reserve(si->dma_rx.skb, 1);
100 101
101 sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN); 102 sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN);
102 if (dma_map_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) { 103 if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) {
103 dev_kfree_skb_any(si->dma_rx.skb); 104 dev_kfree_skb_any(si->dma_rx.skb);
104 return -ENOMEM; 105 return -ENOMEM;
105 } 106 }
@@ -295,7 +296,7 @@ static void sa1100_irda_firtxdma_irq(void *id)
295 /* Account and free the packet. */ 296 /* Account and free the packet. */
296 skb = si->dma_tx.skb; 297 skb = si->dma_tx.skb;
297 if (skb) { 298 if (skb) {
298 dma_unmap_sg(si->dev, &si->dma_tx.sg, 1, 299 dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
299 DMA_TO_DEVICE); 300 DMA_TO_DEVICE);
300 dev->stats.tx_packets ++; 301 dev->stats.tx_packets ++;
301 dev->stats.tx_bytes += skb->len; 302 dev->stats.tx_bytes += skb->len;
@@ -317,7 +318,7 @@ static int sa1100_irda_fir_tx_start(struct sk_buff *skb, struct net_device *dev,
317 318
318 si->dma_tx.skb = skb; 319 si->dma_tx.skb = skb;
319 sg_set_buf(&si->dma_tx.sg, skb->data, skb->len); 320 sg_set_buf(&si->dma_tx.sg, skb->data, skb->len);
320 if (dma_map_sg(si->dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) { 321 if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
321 si->dma_tx.skb = NULL; 322 si->dma_tx.skb = NULL;
322 netif_wake_queue(dev); 323 netif_wake_queue(dev);
323 dev->stats.tx_dropped++; 324 dev->stats.tx_dropped++;
@@ -359,7 +360,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
359 len = dma_addr - sg_dma_address(&si->dma_rx.sg); 360 len = dma_addr - sg_dma_address(&si->dma_rx.sg);
360 if (len > HPSIR_MAX_RXLEN) 361 if (len > HPSIR_MAX_RXLEN)
361 len = HPSIR_MAX_RXLEN; 362 len = HPSIR_MAX_RXLEN;
362 dma_unmap_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); 363 dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
363 364
364 do { 365 do {
365 /* 366 /*
@@ -407,7 +408,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
407 * Remap the buffer - it was previously mapped, and we 408 * Remap the buffer - it was previously mapped, and we
408 * hope that this succeeds. 409 * hope that this succeeds.
409 */ 410 */
410 dma_map_sg(si->dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE); 411 dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
411 } 412 }
412} 413}
413 414
@@ -726,6 +727,9 @@ static int sa1100_irda_start(struct net_device *dev)
726 if (err) 727 if (err)
727 goto err_tx_dma; 728 goto err_tx_dma;
728 729
730 si->dma_rx.dev = si->dev;
731 si->dma_tx.dev = si->dev;
732
729 /* 733 /*
730 * Setup the serial port for the specified speed. 734 * Setup the serial port for the specified speed.
731 */ 735 */
@@ -783,7 +787,7 @@ static int sa1100_irda_stop(struct net_device *dev)
783 */ 787 */
784 skb = si->dma_rx.skb; 788 skb = si->dma_rx.skb;
785 if (skb) { 789 if (skb) {
786 dma_unmap_sg(si->dev, &si->dma_rx.sg, 1, 790 dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1,
787 DMA_FROM_DEVICE); 791 DMA_FROM_DEVICE);
788 dev_kfree_skb(skb); 792 dev_kfree_skb(skb);
789 si->dma_rx.skb = NULL; 793 si->dma_rx.skb = NULL;
@@ -791,7 +795,7 @@ static int sa1100_irda_stop(struct net_device *dev)
791 795
792 skb = si->dma_tx.skb; 796 skb = si->dma_tx.skb;
793 if (skb) { 797 if (skb) {
794 dma_unmap_sg(si->dev, &si->dma_tx.sg, 1, 798 dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
795 DMA_TO_DEVICE); 799 DMA_TO_DEVICE);
796 dev_kfree_skb(skb); 800 dev_kfree_skb(skb);
797 si->dma_tx.skb = NULL; 801 si->dma_tx.skb = NULL;