aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRichard Röjfors <richard.rojfors@pelagicore.com>2010-07-27 08:57:01 -0400
committerDavid S. Miller <davem@davemloft.net>2010-07-27 23:48:19 -0400
commit94fe8c683cea97fe2c59a5f0dc206aa329c5763c (patch)
tree79349dba5fc518f8795cfea4c1b70217f36d2c6f /drivers
parent3eeb29972b1139f733f7269def527900729f4cc7 (diff)
ks8842: Support DMA when accessed via timberdale
This patch adds support for RX and TX DMA via the DMA API, this is only supported when the KS8842 is accessed via timberdale. There is no support for DMA on the generic bus interface it self, a state machine inside the FPGA is handling RX and TX transfers to/from buffers in the FPGA. The host CPU can do DMA to and from these buffers. The FPGA has to handle the RX interrupts, so these must be enabled in the ks8842 but not in the FPGA. The driver must not disable the RX interrupt that would mean that the data transfers into the FPGA buffers would stop. The host shall not enable TX interrupts since TX is handled by the FPGA, the host is notified by DMA callbacks when transfers are finished. Which DMA channels to use are added as parameters in the platform data struct. Signed-off-by: Richard Röjfors <richard.rojfors@pelagicore.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ks8842.c464
1 files changed, 443 insertions, 21 deletions
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 289b0bee346..3fe38c787f2 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -30,6 +30,9 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/ethtool.h> 31#include <linux/ethtool.h>
32#include <linux/ks8842.h> 32#include <linux/ks8842.h>
33#include <linux/dmaengine.h>
34#include <linux/dma-mapping.h>
35#include <linux/scatterlist.h>
33 36
34#define DRV_NAME "ks8842" 37#define DRV_NAME "ks8842"
35 38
@@ -82,6 +85,15 @@
82#define IRQ_RX_ERROR 0x0080 85#define IRQ_RX_ERROR 0x0080
83#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ 86#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
84 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) 87 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
88/* When running via timberdale in DMA mode, the RX interrupt should be
89 enabled in the KS8842, but not in the FPGA IP, since the IP handles
90 RX DMA internally.
91 TX interrupts are not needed it is handled by the FPGA the driver is
92 notified via DMA callbacks.
93*/
94#define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
95 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
96#define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX)
85#define REG_ISR 0x02 97#define REG_ISR 0x02
86#define REG_RXSR 0x04 98#define REG_RXSR 0x04
87#define RXSR_VALID 0x8000 99#define RXSR_VALID 0x8000
@@ -124,6 +136,28 @@
124#define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ 136#define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */
125#define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ 137#define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */
126 138
139#define DMA_BUFFER_SIZE 2048
140
141struct ks8842_tx_dma_ctl {
142 struct dma_chan *chan;
143 struct dma_async_tx_descriptor *adesc;
144 void *buf;
145 struct scatterlist sg;
146 int channel;
147};
148
149struct ks8842_rx_dma_ctl {
150 struct dma_chan *chan;
151 struct dma_async_tx_descriptor *adesc;
152 struct sk_buff *skb;
153 struct scatterlist sg;
154 struct tasklet_struct tasklet;
155 int channel;
156};
157
158#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
159 ((adapter)->dma_rx.channel != -1))
160
127struct ks8842_adapter { 161struct ks8842_adapter {
128 void __iomem *hw_addr; 162 void __iomem *hw_addr;
129 int irq; 163 int irq;
@@ -132,8 +166,19 @@ struct ks8842_adapter {
132 spinlock_t lock; /* spinlock to be interrupt safe */ 166 spinlock_t lock; /* spinlock to be interrupt safe */
133 struct work_struct timeout_work; 167 struct work_struct timeout_work;
134 struct net_device *netdev; 168 struct net_device *netdev;
169 struct device *dev;
170 struct ks8842_tx_dma_ctl dma_tx;
171 struct ks8842_rx_dma_ctl dma_rx;
135}; 172};
136 173
174static void ks8842_dma_rx_cb(void *data);
175static void ks8842_dma_tx_cb(void *data);
176
177static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
178{
179 iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
180}
181
137static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) 182static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
138{ 183{
139 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); 184 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
@@ -297,8 +342,19 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
297 ks8842_write16(adapter, 18, 0xffff, REG_ISR); 342 ks8842_write16(adapter, 18, 0xffff, REG_ISR);
298 343
299 /* enable interrupts */ 344 /* enable interrupts */
300 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 345 if (KS8842_USE_DMA(adapter)) {
301 346 /* When running in DMA Mode the RX interrupt is not enabled in
347 timberdale because RX data is received by DMA callbacks
348 it must still be enabled in the KS8842 because it indicates
349 to timberdale when there is RX data for it's DMA FIFOs */
350 iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
351 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
352 } else {
353 if (!(adapter->conf_flags & MICREL_KS884X))
354 iowrite16(ENABLED_IRQS,
355 adapter->hw_addr + REG_TIMB_IER);
356 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
357 }
302 /* enable the switch */ 358 /* enable the switch */
303 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); 359 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
304} 360}
@@ -371,6 +427,53 @@ static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
371 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; 427 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
372} 428}
373 429
430static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
431{
432 struct ks8842_adapter *adapter = netdev_priv(netdev);
433 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
434 u8 *buf = ctl->buf;
435
436 if (ctl->adesc) {
437 netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
438 /* transfer ongoing */
439 return NETDEV_TX_BUSY;
440 }
441
442 sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
443
444 /* copy data to the TX buffer */
445 /* the control word, enable IRQ, port 1 and the length */
446 *buf++ = 0x00;
447 *buf++ = 0x01; /* Port 1 */
448 *buf++ = skb->len & 0xff;
449 *buf++ = (skb->len >> 8) & 0xff;
450 skb_copy_from_linear_data(skb, buf, skb->len);
451
452 dma_sync_single_range_for_device(adapter->dev,
453 sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
454 DMA_TO_DEVICE);
455
456 /* make sure the length is a multiple of 4 */
457 if (sg_dma_len(&ctl->sg) % 4)
458 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
459
460 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
461 &ctl->sg, 1, DMA_TO_DEVICE,
462 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
463 if (!ctl->adesc)
464 return NETDEV_TX_BUSY;
465
466 ctl->adesc->callback_param = netdev;
467 ctl->adesc->callback = ks8842_dma_tx_cb;
468 ctl->adesc->tx_submit(ctl->adesc);
469
470 netdev->stats.tx_bytes += skb->len;
471
472 dev_kfree_skb(skb);
473
474 return NETDEV_TX_OK;
475}
476
374static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) 477static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
375{ 478{
376 struct ks8842_adapter *adapter = netdev_priv(netdev); 479 struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -422,6 +525,121 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
422 return NETDEV_TX_OK; 525 return NETDEV_TX_OK;
423} 526}
424 527
528static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
529{
530 netdev_dbg(netdev, "RX error, status: %x\n", status);
531
532 netdev->stats.rx_errors++;
533 if (status & RXSR_TOO_LONG)
534 netdev->stats.rx_length_errors++;
535 if (status & RXSR_CRC_ERROR)
536 netdev->stats.rx_crc_errors++;
537 if (status & RXSR_RUNT)
538 netdev->stats.rx_frame_errors++;
539}
540
541static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
542 int len)
543{
544 netdev_dbg(netdev, "RX packet, len: %d\n", len);
545
546 netdev->stats.rx_packets++;
547 netdev->stats.rx_bytes += len;
548 if (status & RXSR_MULTICAST)
549 netdev->stats.multicast++;
550}
551
552static int __ks8842_start_new_rx_dma(struct net_device *netdev)
553{
554 struct ks8842_adapter *adapter = netdev_priv(netdev);
555 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
556 struct scatterlist *sg = &ctl->sg;
557 int err;
558
559 ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
560 if (ctl->skb) {
561 sg_init_table(sg, 1);
562 sg_dma_address(sg) = dma_map_single(adapter->dev,
563 ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
564 err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
565 if (unlikely(err)) {
566 sg_dma_address(sg) = 0;
567 goto out;
568 }
569
570 sg_dma_len(sg) = DMA_BUFFER_SIZE;
571
572 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
573 sg, 1, DMA_FROM_DEVICE,
574 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
575
576 if (!ctl->adesc)
577 goto out;
578
579 ctl->adesc->callback_param = netdev;
580 ctl->adesc->callback = ks8842_dma_rx_cb;
581 ctl->adesc->tx_submit(ctl->adesc);
582 } else {
583 err = -ENOMEM;
584 sg_dma_address(sg) = 0;
585 goto out;
586 }
587
588 return err;
589out:
590 if (sg_dma_address(sg))
591 dma_unmap_single(adapter->dev, sg_dma_address(sg),
592 DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
593 sg_dma_address(sg) = 0;
594 if (ctl->skb)
595 dev_kfree_skb(ctl->skb);
596
597 ctl->skb = NULL;
598
599 printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
600 return err;
601}
602
603static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
604{
605 struct net_device *netdev = (struct net_device *)arg;
606 struct ks8842_adapter *adapter = netdev_priv(netdev);
607 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
608 struct sk_buff *skb = ctl->skb;
609 dma_addr_t addr = sg_dma_address(&ctl->sg);
610 u32 status;
611
612 ctl->adesc = NULL;
613
614 /* kick next transfer going */
615 __ks8842_start_new_rx_dma(netdev);
616
617 /* now handle the data we got */
618 dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
619
620 status = *((u32 *)skb->data);
621
622 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
623 __func__, status & 0xffff);
624
625 /* check the status */
626 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
627 int len = (status >> 16) & 0x7ff;
628
629 ks8842_update_rx_counters(netdev, status, len);
630
631 /* reserve 4 bytes which is the status word */
632 skb_reserve(skb, 4);
633 skb_put(skb, len);
634
635 skb->protocol = eth_type_trans(skb, netdev);
636 netif_rx(skb);
637 } else {
638 ks8842_update_rx_err_counters(netdev, status);
639 dev_kfree_skb(skb);
640 }
641}
642
425static void ks8842_rx_frame(struct net_device *netdev, 643static void ks8842_rx_frame(struct net_device *netdev,
426 struct ks8842_adapter *adapter) 644 struct ks8842_adapter *adapter)
427{ 645{
@@ -445,13 +663,9 @@ static void ks8842_rx_frame(struct net_device *netdev,
445 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 663 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
446 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); 664 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
447 665
448 netdev_dbg(netdev, "%s, got package, len: %d\n", __func__, len);
449 if (skb) { 666 if (skb) {
450 667
451 netdev->stats.rx_packets++; 668 ks8842_update_rx_counters(netdev, status, len);
452 netdev->stats.rx_bytes += len;
453 if (status & RXSR_MULTICAST)
454 netdev->stats.multicast++;
455 669
456 if (adapter->conf_flags & KS884X_16BIT) { 670 if (adapter->conf_flags & KS884X_16BIT) {
457 u16 *data16 = (u16 *)skb_put(skb, len); 671 u16 *data16 = (u16 *)skb_put(skb, len);
@@ -477,16 +691,8 @@ static void ks8842_rx_frame(struct net_device *netdev,
477 netif_rx(skb); 691 netif_rx(skb);
478 } else 692 } else
479 netdev->stats.rx_dropped++; 693 netdev->stats.rx_dropped++;
480 } else { 694 } else
481 netdev_dbg(netdev, "RX error, status: %x\n", status); 695 ks8842_update_rx_err_counters(netdev, status);
482 netdev->stats.rx_errors++;
483 if (status & RXSR_TOO_LONG)
484 netdev->stats.rx_length_errors++;
485 if (status & RXSR_CRC_ERROR)
486 netdev->stats.rx_crc_errors++;
487 if (status & RXSR_RUNT)
488 netdev->stats.rx_frame_errors++;
489 }
490 696
491 /* set high watermark to 3K */ 697 /* set high watermark to 3K */
492 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); 698 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
@@ -541,6 +747,12 @@ void ks8842_tasklet(unsigned long arg)
541 isr = ks8842_read16(adapter, 18, REG_ISR); 747 isr = ks8842_read16(adapter, 18, REG_ISR);
542 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); 748 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
543 749
750 /* when running in DMA mode, do not ack RX interrupts, it is handled
751 internally by timberdale, otherwise it's DMA FIFO:s would stop
752 */
753 if (KS8842_USE_DMA(adapter))
754 isr &= ~IRQ_RX;
755
544 /* Ack */ 756 /* Ack */
545 ks8842_write16(adapter, 18, isr, REG_ISR); 757 ks8842_write16(adapter, 18, isr, REG_ISR);
546 758
@@ -554,9 +766,11 @@ void ks8842_tasklet(unsigned long arg)
554 if (isr & IRQ_LINK_CHANGE) 766 if (isr & IRQ_LINK_CHANGE)
555 ks8842_update_link_status(netdev, adapter); 767 ks8842_update_link_status(netdev, adapter);
556 768
557 if (isr & (IRQ_RX | IRQ_RX_ERROR)) 769 /* should not get IRQ_RX when running DMA mode */
770 if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
558 ks8842_handle_rx(netdev, adapter); 771 ks8842_handle_rx(netdev, adapter);
559 772
773 /* should only happen when in PIO mode */
560 if (isr & IRQ_TX) 774 if (isr & IRQ_TX)
561 ks8842_handle_tx(netdev, adapter); 775 ks8842_handle_tx(netdev, adapter);
562 776
@@ -575,8 +789,17 @@ void ks8842_tasklet(unsigned long arg)
575 789
576 /* re-enable interrupts, put back the bank selection register */ 790 /* re-enable interrupts, put back the bank selection register */
577 spin_lock_irqsave(&adapter->lock, flags); 791 spin_lock_irqsave(&adapter->lock, flags);
578 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 792 if (KS8842_USE_DMA(adapter))
793 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
794 else
795 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
579 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 796 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
797
798 /* Make sure timberdale continues DMA operations, they are stopped while
799 we are handling the ks8842 because we might change bank */
800 if (KS8842_USE_DMA(adapter))
801 ks8842_resume_dma(adapter);
802
580 spin_unlock_irqrestore(&adapter->lock, flags); 803 spin_unlock_irqrestore(&adapter->lock, flags);
581} 804}
582 805
@@ -592,8 +815,12 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
592 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); 815 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
593 816
594 if (isr) { 817 if (isr) {
595 /* disable IRQ */ 818 if (KS8842_USE_DMA(adapter))
596 ks8842_write16(adapter, 18, 0x00, REG_IER); 819 /* disable all but RX IRQ, since the FPGA relies on it*/
820 ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
821 else
822 /* disable IRQ */
823 ks8842_write16(adapter, 18, 0x00, REG_IER);
597 824
598 /* schedule tasklet */ 825 /* schedule tasklet */
599 tasklet_schedule(&adapter->tasklet); 826 tasklet_schedule(&adapter->tasklet);
@@ -603,9 +830,151 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
603 830
604 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 831 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
605 832
833 /* After an interrupt, tell timberdale to continue DMA operations.
834 DMA is disabled while we are handling the ks8842 because we might
835 change bank */
836 ks8842_resume_dma(adapter);
837
606 return ret; 838 return ret;
607} 839}
608 840
841static void ks8842_dma_rx_cb(void *data)
842{
843 struct net_device *netdev = data;
844 struct ks8842_adapter *adapter = netdev_priv(netdev);
845
846 netdev_dbg(netdev, "RX DMA finished\n");
847 /* schedule tasklet */
848 if (adapter->dma_rx.adesc)
849 tasklet_schedule(&adapter->dma_rx.tasklet);
850}
851
852static void ks8842_dma_tx_cb(void *data)
853{
854 struct net_device *netdev = data;
855 struct ks8842_adapter *adapter = netdev_priv(netdev);
856 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
857
858 netdev_dbg(netdev, "TX DMA finished\n");
859
860 if (!ctl->adesc)
861 return;
862
863 netdev->stats.tx_packets++;
864 ctl->adesc = NULL;
865
866 if (netif_queue_stopped(netdev))
867 netif_wake_queue(netdev);
868}
869
870static void ks8842_stop_dma(struct ks8842_adapter *adapter)
871{
872 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
873 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
874
875 tx_ctl->adesc = NULL;
876 if (tx_ctl->chan)
877 tx_ctl->chan->device->device_control(tx_ctl->chan,
878 DMA_TERMINATE_ALL, 0);
879
880 rx_ctl->adesc = NULL;
881 if (rx_ctl->chan)
882 rx_ctl->chan->device->device_control(rx_ctl->chan,
883 DMA_TERMINATE_ALL, 0);
884
885 if (sg_dma_address(&rx_ctl->sg))
886 dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
887 DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
888 sg_dma_address(&rx_ctl->sg) = 0;
889
890 dev_kfree_skb(rx_ctl->skb);
891 rx_ctl->skb = NULL;
892}
893
894static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
895{
896 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
897 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
898
899 ks8842_stop_dma(adapter);
900
901 if (tx_ctl->chan)
902 dma_release_channel(tx_ctl->chan);
903 tx_ctl->chan = NULL;
904
905 if (rx_ctl->chan)
906 dma_release_channel(rx_ctl->chan);
907 rx_ctl->chan = NULL;
908
909 tasklet_kill(&rx_ctl->tasklet);
910
911 if (sg_dma_address(&tx_ctl->sg))
912 dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
913 DMA_BUFFER_SIZE, DMA_TO_DEVICE);
914 sg_dma_address(&tx_ctl->sg) = 0;
915
916 kfree(tx_ctl->buf);
917 tx_ctl->buf = NULL;
918}
919
920static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
921{
922 return chan->chan_id == (int)filter_param;
923}
924
925static int ks8842_alloc_dma_bufs(struct net_device *netdev)
926{
927 struct ks8842_adapter *adapter = netdev_priv(netdev);
928 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
929 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
930 int err;
931
932 dma_cap_mask_t mask;
933
934 dma_cap_zero(mask);
935 dma_cap_set(DMA_SLAVE, mask);
936 dma_cap_set(DMA_PRIVATE, mask);
937
938 sg_init_table(&tx_ctl->sg, 1);
939
940 tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
941 (void *)tx_ctl->channel);
942 if (!tx_ctl->chan) {
943 err = -ENODEV;
944 goto err;
945 }
946
947 /* allocate DMA buffer */
948 tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
949 if (!tx_ctl->buf) {
950 err = -ENOMEM;
951 goto err;
952 }
953
954 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
955 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
956 err = dma_mapping_error(adapter->dev,
957 sg_dma_address(&tx_ctl->sg));
958 if (err) {
959 sg_dma_address(&tx_ctl->sg) = 0;
960 goto err;
961 }
962
963 rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
964 (void *)rx_ctl->channel);
965 if (!rx_ctl->chan) {
966 err = -ENODEV;
967 goto err;
968 }
969
970 tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
971 (unsigned long)netdev);
972
973 return 0;
974err:
975 ks8842_dealloc_dma_bufs(adapter);
976 return err;
977}
609 978
610/* Netdevice operations */ 979/* Netdevice operations */
611 980
@@ -616,6 +985,25 @@ static int ks8842_open(struct net_device *netdev)
616 985
617 netdev_dbg(netdev, "%s - entry\n", __func__); 986 netdev_dbg(netdev, "%s - entry\n", __func__);
618 987
988 if (KS8842_USE_DMA(adapter)) {
989 err = ks8842_alloc_dma_bufs(netdev);
990
991 if (!err) {
992 /* start RX dma */
993 err = __ks8842_start_new_rx_dma(netdev);
994 if (err)
995 ks8842_dealloc_dma_bufs(adapter);
996 }
997
998 if (err) {
999 printk(KERN_WARNING DRV_NAME
1000 ": Failed to initiate DMA, running PIO\n");
1001 ks8842_dealloc_dma_bufs(adapter);
1002 adapter->dma_rx.channel = -1;
1003 adapter->dma_tx.channel = -1;
1004 }
1005 }
1006
619 /* reset the HW */ 1007 /* reset the HW */
620 ks8842_reset_hw(adapter); 1008 ks8842_reset_hw(adapter);
621 1009
@@ -641,6 +1029,9 @@ static int ks8842_close(struct net_device *netdev)
641 1029
642 cancel_work_sync(&adapter->timeout_work); 1030 cancel_work_sync(&adapter->timeout_work);
643 1031
1032 if (KS8842_USE_DMA(adapter))
1033 ks8842_dealloc_dma_bufs(adapter);
1034
644 /* free the irq */ 1035 /* free the irq */
645 free_irq(adapter->irq, netdev); 1036 free_irq(adapter->irq, netdev);
646 1037
@@ -658,6 +1049,17 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
658 1049
659 netdev_dbg(netdev, "%s: entry\n", __func__); 1050 netdev_dbg(netdev, "%s: entry\n", __func__);
660 1051
1052 if (KS8842_USE_DMA(adapter)) {
1053 unsigned long flags;
1054 ret = ks8842_tx_frame_dma(skb, netdev);
1055 /* for now only allow one transfer at the time */
1056 spin_lock_irqsave(&adapter->lock, flags);
1057 if (adapter->dma_tx.adesc)
1058 netif_stop_queue(netdev);
1059 spin_unlock_irqrestore(&adapter->lock, flags);
1060 return ret;
1061 }
1062
661 ret = ks8842_tx_frame(skb, netdev); 1063 ret = ks8842_tx_frame(skb, netdev);
662 1064
663 if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8) 1065 if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8)
@@ -693,6 +1095,10 @@ static void ks8842_tx_timeout_work(struct work_struct *work)
693 netdev_dbg(netdev, "%s: entry\n", __func__); 1095 netdev_dbg(netdev, "%s: entry\n", __func__);
694 1096
695 spin_lock_irqsave(&adapter->lock, flags); 1097 spin_lock_irqsave(&adapter->lock, flags);
1098
1099 if (KS8842_USE_DMA(adapter))
1100 ks8842_stop_dma(adapter);
1101
696 /* disable interrupts */ 1102 /* disable interrupts */
697 ks8842_write16(adapter, 18, 0, REG_IER); 1103 ks8842_write16(adapter, 18, 0, REG_IER);
698 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); 1104 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
@@ -706,6 +1112,9 @@ static void ks8842_tx_timeout_work(struct work_struct *work)
706 ks8842_write_mac_addr(adapter, netdev->dev_addr); 1112 ks8842_write_mac_addr(adapter, netdev->dev_addr);
707 1113
708 ks8842_update_link_status(netdev, adapter); 1114 ks8842_update_link_status(netdev, adapter);
1115
1116 if (KS8842_USE_DMA(adapter))
1117 __ks8842_start_new_rx_dma(netdev);
709} 1118}
710 1119
711static void ks8842_tx_timeout(struct net_device *netdev) 1120static void ks8842_tx_timeout(struct net_device *netdev)
@@ -765,6 +1174,19 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
765 goto err_get_irq; 1174 goto err_get_irq;
766 } 1175 }
767 1176
1177 adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
1178
1179 /* DMA is only supported when accessed via timberdale */
1180 if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
1181 (pdata->tx_dma_channel != -1) &&
1182 (pdata->rx_dma_channel != -1)) {
1183 adapter->dma_rx.channel = pdata->rx_dma_channel;
1184 adapter->dma_tx.channel = pdata->tx_dma_channel;
1185 } else {
1186 adapter->dma_rx.channel = -1;
1187 adapter->dma_tx.channel = -1;
1188 }
1189
768 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); 1190 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
769 spin_lock_init(&adapter->lock); 1191 spin_lock_init(&adapter->lock);
770 1192