aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ks8842.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ks8842.c')
-rw-r--r--drivers/net/ks8842.c706
1 files changed, 614 insertions, 92 deletions
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index f852ab3ae9cf..928b2b83cef5 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -18,6 +18,7 @@
18 18
19/* Supports: 19/* Supports:
20 * The Micrel KS8842 behind the timberdale FPGA 20 * The Micrel KS8842 behind the timberdale FPGA
21 * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface
21 */ 22 */
22 23
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -29,11 +30,19 @@
29#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
30#include <linux/ethtool.h> 31#include <linux/ethtool.h>
31#include <linux/ks8842.h> 32#include <linux/ks8842.h>
33#include <linux/dmaengine.h>
34#include <linux/dma-mapping.h>
35#include <linux/scatterlist.h>
32 36
33#define DRV_NAME "ks8842" 37#define DRV_NAME "ks8842"
34 38
35/* Timberdale specific Registers */ 39/* Timberdale specific Registers */
36#define REG_TIMB_RST 0x1c 40#define REG_TIMB_RST 0x1c
41#define REG_TIMB_FIFO 0x20
42#define REG_TIMB_ISR 0x24
43#define REG_TIMB_IER 0x28
44#define REG_TIMB_IAR 0x2C
45#define REQ_TIMB_DMA_RESUME 0x30
37 46
38/* KS8842 registers */ 47/* KS8842 registers */
39 48
@@ -76,6 +85,15 @@
76#define IRQ_RX_ERROR 0x0080 85#define IRQ_RX_ERROR 0x0080
77#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ 86#define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \
78 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) 87 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
88/* When running via timberdale in DMA mode, the RX interrupt should be
89 enabled in the KS8842, but not in the FPGA IP, since the IP handles
90 RX DMA internally.
91 TX interrupts are not needed it is handled by the FPGA the driver is
92 notified via DMA callbacks.
93*/
94#define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \
95 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR)
96#define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX)
79#define REG_ISR 0x02 97#define REG_ISR 0x02
80#define REG_RXSR 0x04 98#define REG_RXSR 0x04
81#define RXSR_VALID 0x8000 99#define RXSR_VALID 0x8000
@@ -114,14 +132,53 @@
114#define REG_P1CR4 0x02 132#define REG_P1CR4 0x02
115#define REG_P1SR 0x04 133#define REG_P1SR 0x04
116 134
135/* flags passed by platform_device for configuration */
136#define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */
137#define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */
138
139#define DMA_BUFFER_SIZE 2048
140
141struct ks8842_tx_dma_ctl {
142 struct dma_chan *chan;
143 struct dma_async_tx_descriptor *adesc;
144 void *buf;
145 struct scatterlist sg;
146 int channel;
147};
148
149struct ks8842_rx_dma_ctl {
150 struct dma_chan *chan;
151 struct dma_async_tx_descriptor *adesc;
152 struct sk_buff *skb;
153 struct scatterlist sg;
154 struct tasklet_struct tasklet;
155 int channel;
156};
157
158#define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
159 ((adapter)->dma_rx.channel != -1))
160
117struct ks8842_adapter { 161struct ks8842_adapter {
118 void __iomem *hw_addr; 162 void __iomem *hw_addr;
119 int irq; 163 int irq;
164 unsigned long conf_flags; /* copy of platform_device config */
120 struct tasklet_struct tasklet; 165 struct tasklet_struct tasklet;
121 spinlock_t lock; /* spinlock to be interrupt safe */ 166 spinlock_t lock; /* spinlock to be interrupt safe */
122 struct platform_device *pdev; 167 struct work_struct timeout_work;
168 struct net_device *netdev;
169 struct device *dev;
170 struct ks8842_tx_dma_ctl dma_tx;
171 struct ks8842_rx_dma_ctl dma_rx;
123}; 172};
124 173
174static void ks8842_dma_rx_cb(void *data);
175static void ks8842_dma_tx_cb(void *data);
176
177static inline void ks8842_resume_dma(struct ks8842_adapter *adapter)
178{
179 iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME);
180}
181
125static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) 182static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank)
126{ 183{
127 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); 184 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK);
@@ -191,16 +248,21 @@ static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank,
191 248
192static void ks8842_reset(struct ks8842_adapter *adapter) 249static void ks8842_reset(struct ks8842_adapter *adapter)
193{ 250{
194 /* The KS8842 goes haywire when doing softare reset 251 if (adapter->conf_flags & MICREL_KS884X) {
195 * a work around in the timberdale IP is implemented to 252 ks8842_write16(adapter, 3, 1, REG_GRR);
196 * do a hardware reset instead 253 msleep(10);
197 ks8842_write16(adapter, 3, 1, REG_GRR); 254 iowrite16(0, adapter->hw_addr + REG_GRR);
198 msleep(10); 255 } else {
199 iowrite16(0, adapter->hw_addr + REG_GRR); 256 /* The KS8842 goes haywire when doing softare reset
200 */ 257 * a work around in the timberdale IP is implemented to
201 iowrite16(32, adapter->hw_addr + REG_SELECT_BANK); 258 * do a hardware reset instead
202 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); 259 ks8842_write16(adapter, 3, 1, REG_GRR);
203 msleep(20); 260 msleep(10);
261 iowrite16(0, adapter->hw_addr + REG_GRR);
262 */
263 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST);
264 msleep(20);
265 }
204} 266}
205 267
206static void ks8842_update_link_status(struct net_device *netdev, 268static void ks8842_update_link_status(struct net_device *netdev,
@@ -269,8 +331,6 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
269 331
270 /* restart port auto-negotiation */ 332 /* restart port auto-negotiation */
271 ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); 333 ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4);
272 /* only advertise 10Mbps */
273 ks8842_clear_bits(adapter, 49, 3 << 2, REG_P1CR4);
274 334
275 /* Enable the transmitter */ 335 /* Enable the transmitter */
276 ks8842_enable_tx(adapter); 336 ks8842_enable_tx(adapter);
@@ -282,8 +342,19 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
282 ks8842_write16(adapter, 18, 0xffff, REG_ISR); 342 ks8842_write16(adapter, 18, 0xffff, REG_ISR);
283 343
284 /* enable interrupts */ 344 /* enable interrupts */
285 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 345 if (KS8842_USE_DMA(adapter)) {
286 346 /* When running in DMA Mode the RX interrupt is not enabled in
347 timberdale because RX data is received by DMA callbacks
348 it must still be enabled in the KS8842 because it indicates
349 to timberdale when there is RX data for it's DMA FIFOs */
350 iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER);
351 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
352 } else {
353 if (!(adapter->conf_flags & MICREL_KS884X))
354 iowrite16(ENABLED_IRQS,
355 adapter->hw_addr + REG_TIMB_IER);
356 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
357 }
287 /* enable the switch */ 358 /* enable the switch */
288 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); 359 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
289} 360}
@@ -296,13 +367,28 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
296 for (i = 0; i < ETH_ALEN; i++) 367 for (i = 0; i < ETH_ALEN; i++)
297 dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); 368 dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
298 369
299 /* make sure the switch port uses the same MAC as the QMU */ 370 if (adapter->conf_flags & MICREL_KS884X) {
300 mac = ks8842_read16(adapter, 2, REG_MARL); 371 /*
301 ks8842_write16(adapter, 39, mac, REG_MACAR1); 372 the sequence of saving mac addr between MAC and Switch is
302 mac = ks8842_read16(adapter, 2, REG_MARM); 373 different.
303 ks8842_write16(adapter, 39, mac, REG_MACAR2); 374 */
304 mac = ks8842_read16(adapter, 2, REG_MARH); 375
305 ks8842_write16(adapter, 39, mac, REG_MACAR3); 376 mac = ks8842_read16(adapter, 2, REG_MARL);
377 ks8842_write16(adapter, 39, mac, REG_MACAR3);
378 mac = ks8842_read16(adapter, 2, REG_MARM);
379 ks8842_write16(adapter, 39, mac, REG_MACAR2);
380 mac = ks8842_read16(adapter, 2, REG_MARH);
381 ks8842_write16(adapter, 39, mac, REG_MACAR1);
382 } else {
383
384 /* make sure the switch port uses the same MAC as the QMU */
385 mac = ks8842_read16(adapter, 2, REG_MARL);
386 ks8842_write16(adapter, 39, mac, REG_MACAR1);
387 mac = ks8842_read16(adapter, 2, REG_MARM);
388 ks8842_write16(adapter, 39, mac, REG_MACAR2);
389 mac = ks8842_read16(adapter, 2, REG_MARH);
390 ks8842_write16(adapter, 39, mac, REG_MACAR3);
391 }
306} 392}
307 393
308static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) 394static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
@@ -313,8 +399,25 @@ static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
313 spin_lock_irqsave(&adapter->lock, flags); 399 spin_lock_irqsave(&adapter->lock, flags);
314 for (i = 0; i < ETH_ALEN; i++) { 400 for (i = 0; i < ETH_ALEN; i++) {
315 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i); 401 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
316 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], 402 if (!(adapter->conf_flags & MICREL_KS884X))
317 REG_MACAR1 + i); 403 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
404 REG_MACAR1 + i);
405 }
406
407 if (adapter->conf_flags & MICREL_KS884X) {
408 /*
409 the sequence of saving mac addr between MAC and Switch is
410 different.
411 */
412
413 u16 mac;
414
415 mac = ks8842_read16(adapter, 2, REG_MARL);
416 ks8842_write16(adapter, 39, mac, REG_MACAR3);
417 mac = ks8842_read16(adapter, 2, REG_MARM);
418 ks8842_write16(adapter, 39, mac, REG_MACAR2);
419 mac = ks8842_read16(adapter, 2, REG_MARH);
420 ks8842_write16(adapter, 39, mac, REG_MACAR1);
318 } 421 }
319 spin_unlock_irqrestore(&adapter->lock, flags); 422 spin_unlock_irqrestore(&adapter->lock, flags);
320} 423}
@@ -324,15 +427,59 @@ static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
324 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; 427 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
325} 428}
326 429
430static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
431{
432 struct ks8842_adapter *adapter = netdev_priv(netdev);
433 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
434 u8 *buf = ctl->buf;
435
436 if (ctl->adesc) {
437 netdev_dbg(netdev, "%s: TX ongoing\n", __func__);
438 /* transfer ongoing */
439 return NETDEV_TX_BUSY;
440 }
441
442 sg_dma_len(&ctl->sg) = skb->len + sizeof(u32);
443
444 /* copy data to the TX buffer */
445 /* the control word, enable IRQ, port 1 and the length */
446 *buf++ = 0x00;
447 *buf++ = 0x01; /* Port 1 */
448 *buf++ = skb->len & 0xff;
449 *buf++ = (skb->len >> 8) & 0xff;
450 skb_copy_from_linear_data(skb, buf, skb->len);
451
452 dma_sync_single_range_for_device(adapter->dev,
453 sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg),
454 DMA_TO_DEVICE);
455
456 /* make sure the length is a multiple of 4 */
457 if (sg_dma_len(&ctl->sg) % 4)
458 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
459
460 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
461 &ctl->sg, 1, DMA_TO_DEVICE,
462 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
463 if (!ctl->adesc)
464 return NETDEV_TX_BUSY;
465
466 ctl->adesc->callback_param = netdev;
467 ctl->adesc->callback = ks8842_dma_tx_cb;
468 ctl->adesc->tx_submit(ctl->adesc);
469
470 netdev->stats.tx_bytes += skb->len;
471
472 dev_kfree_skb(skb);
473
474 return NETDEV_TX_OK;
475}
476
327static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) 477static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
328{ 478{
329 struct ks8842_adapter *adapter = netdev_priv(netdev); 479 struct ks8842_adapter *adapter = netdev_priv(netdev);
330 int len = skb->len; 480 int len = skb->len;
331 u32 *ptr = (u32 *)skb->data;
332 u32 ctrl;
333 481
334 dev_dbg(&adapter->pdev->dev, 482 netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n",
335 "%s: len %u head %p data %p tail %p end %p\n",
336 __func__, skb->len, skb->head, skb->data, 483 __func__, skb->len, skb->head, skb->data,
337 skb_tail_pointer(skb), skb_end_pointer(skb)); 484 skb_tail_pointer(skb), skb_end_pointer(skb));
338 485
@@ -340,17 +487,34 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
340 if (ks8842_tx_fifo_space(adapter) < len + 8) 487 if (ks8842_tx_fifo_space(adapter) < len + 8)
341 return NETDEV_TX_BUSY; 488 return NETDEV_TX_BUSY;
342 489
343 /* the control word, enable IRQ, port 1 and the length */ 490 if (adapter->conf_flags & KS884X_16BIT) {
344 ctrl = 0x8000 | 0x100 | (len << 16); 491 u16 *ptr16 = (u16 *)skb->data;
345 ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO); 492 ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO);
493 ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI);
494 netdev->stats.tx_bytes += len;
495
496 /* copy buffer */
497 while (len > 0) {
498 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO);
499 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI);
500 len -= sizeof(u32);
501 }
502 } else {
346 503
347 netdev->stats.tx_bytes += len; 504 u32 *ptr = (u32 *)skb->data;
505 u32 ctrl;
506 /* the control word, enable IRQ, port 1 and the length */
507 ctrl = 0x8000 | 0x100 | (len << 16);
508 ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);
348 509
349 /* copy buffer */ 510 netdev->stats.tx_bytes += len;
350 while (len > 0) { 511
351 iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); 512 /* copy buffer */
352 len -= sizeof(u32); 513 while (len > 0) {
353 ptr++; 514 iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
515 len -= sizeof(u32);
516 ptr++;
517 }
354 } 518 }
355 519
356 /* enqueue packet */ 520 /* enqueue packet */
@@ -361,54 +525,174 @@ static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
361 return NETDEV_TX_OK; 525 return NETDEV_TX_OK;
362} 526}
363 527
364static void ks8842_rx_frame(struct net_device *netdev, 528static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status)
365 struct ks8842_adapter *adapter) 529{
530 netdev_dbg(netdev, "RX error, status: %x\n", status);
531
532 netdev->stats.rx_errors++;
533 if (status & RXSR_TOO_LONG)
534 netdev->stats.rx_length_errors++;
535 if (status & RXSR_CRC_ERROR)
536 netdev->stats.rx_crc_errors++;
537 if (status & RXSR_RUNT)
538 netdev->stats.rx_frame_errors++;
539}
540
541static void ks8842_update_rx_counters(struct net_device *netdev, u32 status,
542 int len)
543{
544 netdev_dbg(netdev, "RX packet, len: %d\n", len);
545
546 netdev->stats.rx_packets++;
547 netdev->stats.rx_bytes += len;
548 if (status & RXSR_MULTICAST)
549 netdev->stats.multicast++;
550}
551
552static int __ks8842_start_new_rx_dma(struct net_device *netdev)
366{ 553{
367 u32 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); 554 struct ks8842_adapter *adapter = netdev_priv(netdev);
368 int len = (status >> 16) & 0x7ff; 555 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
556 struct scatterlist *sg = &ctl->sg;
557 int err;
369 558
370 status &= 0xffff; 559 ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE);
560 if (ctl->skb) {
561 sg_init_table(sg, 1);
562 sg_dma_address(sg) = dma_map_single(adapter->dev,
563 ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
564 err = dma_mapping_error(adapter->dev, sg_dma_address(sg));
565 if (unlikely(err)) {
566 sg_dma_address(sg) = 0;
567 goto out;
568 }
569
570 sg_dma_len(sg) = DMA_BUFFER_SIZE;
571
572 ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
573 sg, 1, DMA_FROM_DEVICE,
574 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
575
576 if (!ctl->adesc)
577 goto out;
578
579 ctl->adesc->callback_param = netdev;
580 ctl->adesc->callback = ks8842_dma_rx_cb;
581 ctl->adesc->tx_submit(ctl->adesc);
582 } else {
583 err = -ENOMEM;
584 sg_dma_address(sg) = 0;
585 goto out;
586 }
371 587
372 dev_dbg(&adapter->pdev->dev, "%s - rx_data: status: %x\n", 588 return err;
373 __func__, status); 589out:
590 if (sg_dma_address(sg))
591 dma_unmap_single(adapter->dev, sg_dma_address(sg),
592 DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
593 sg_dma_address(sg) = 0;
594 if (ctl->skb)
595 dev_kfree_skb(ctl->skb);
596
597 ctl->skb = NULL;
598
599 printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
600 return err;
601}
602
603static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
604{
605 struct net_device *netdev = (struct net_device *)arg;
606 struct ks8842_adapter *adapter = netdev_priv(netdev);
607 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
608 struct sk_buff *skb = ctl->skb;
609 dma_addr_t addr = sg_dma_address(&ctl->sg);
610 u32 status;
611
612 ctl->adesc = NULL;
613
614 /* kick next transfer going */
615 __ks8842_start_new_rx_dma(netdev);
616
617 /* now handle the data we got */
618 dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
619
620 status = *((u32 *)skb->data);
621
622 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
623 __func__, status & 0xffff);
374 624
375 /* check the status */ 625 /* check the status */
376 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 626 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
377 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len); 627 int len = (status >> 16) & 0x7ff;
378 628
379 dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n", 629 ks8842_update_rx_counters(netdev, status, len);
380 __func__, len);
381 if (skb) {
382 u32 *data;
383 630
384 netdev->stats.rx_packets++; 631 /* reserve 4 bytes which is the status word */
385 netdev->stats.rx_bytes += len; 632 skb_reserve(skb, 4);
386 if (status & RXSR_MULTICAST) 633 skb_put(skb, len);
387 netdev->stats.multicast++;
388 634
389 data = (u32 *)skb_put(skb, len); 635 skb->protocol = eth_type_trans(skb, netdev);
636 netif_rx(skb);
637 } else {
638 ks8842_update_rx_err_counters(netdev, status);
639 dev_kfree_skb(skb);
640 }
641}
390 642
391 ks8842_select_bank(adapter, 17); 643static void ks8842_rx_frame(struct net_device *netdev,
392 while (len > 0) { 644 struct ks8842_adapter *adapter)
393 *data++ = ioread32(adapter->hw_addr + 645{
394 REG_QMU_DATA_LO); 646 u32 status;
395 len -= sizeof(u32); 647 int len;
396 } 648
649 if (adapter->conf_flags & KS884X_16BIT) {
650 status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO);
651 len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI);
652 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
653 __func__, status);
654 } else {
655 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO);
656 len = (status >> 16) & 0x7ff;
657 status &= 0xffff;
658 netdev_dbg(netdev, "%s - rx_data: status: %x\n",
659 __func__, status);
660 }
397 661
662 /* check the status */
663 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
664 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
665
666 if (skb) {
667
668 ks8842_update_rx_counters(netdev, status, len);
669
670 if (adapter->conf_flags & KS884X_16BIT) {
671 u16 *data16 = (u16 *)skb_put(skb, len);
672 ks8842_select_bank(adapter, 17);
673 while (len > 0) {
674 *data16++ = ioread16(adapter->hw_addr +
675 REG_QMU_DATA_LO);
676 *data16++ = ioread16(adapter->hw_addr +
677 REG_QMU_DATA_HI);
678 len -= sizeof(u32);
679 }
680 } else {
681 u32 *data = (u32 *)skb_put(skb, len);
682
683 ks8842_select_bank(adapter, 17);
684 while (len > 0) {
685 *data++ = ioread32(adapter->hw_addr +
686 REG_QMU_DATA_LO);
687 len -= sizeof(u32);
688 }
689 }
398 skb->protocol = eth_type_trans(skb, netdev); 690 skb->protocol = eth_type_trans(skb, netdev);
399 netif_rx(skb); 691 netif_rx(skb);
400 } else 692 } else
401 netdev->stats.rx_dropped++; 693 netdev->stats.rx_dropped++;
402 } else { 694 } else
403 dev_dbg(&adapter->pdev->dev, "RX error, status: %x\n", status); 695 ks8842_update_rx_err_counters(netdev, status);
404 netdev->stats.rx_errors++;
405 if (status & RXSR_TOO_LONG)
406 netdev->stats.rx_length_errors++;
407 if (status & RXSR_CRC_ERROR)
408 netdev->stats.rx_crc_errors++;
409 if (status & RXSR_RUNT)
410 netdev->stats.rx_frame_errors++;
411 }
412 696
413 /* set high watermark to 3K */ 697 /* set high watermark to 3K */
414 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); 698 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR);
@@ -423,8 +707,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
423void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter) 707void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
424{ 708{
425 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; 709 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
426 dev_dbg(&adapter->pdev->dev, "%s Entry - rx_data: %d\n", 710 netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
427 __func__, rx_data);
428 while (rx_data) { 711 while (rx_data) {
429 ks8842_rx_frame(netdev, adapter); 712 ks8842_rx_frame(netdev, adapter);
430 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; 713 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
@@ -434,7 +717,7 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
434void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter) 717void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
435{ 718{
436 u16 sr = ks8842_read16(adapter, 16, REG_TXSR); 719 u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
437 dev_dbg(&adapter->pdev->dev, "%s - entry, sr: %x\n", __func__, sr); 720 netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
438 netdev->stats.tx_packets++; 721 netdev->stats.tx_packets++;
439 if (netif_queue_stopped(netdev)) 722 if (netif_queue_stopped(netdev))
440 netif_wake_queue(netdev); 723 netif_wake_queue(netdev);
@@ -443,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
443void ks8842_handle_rx_overrun(struct net_device *netdev, 726void ks8842_handle_rx_overrun(struct net_device *netdev,
444 struct ks8842_adapter *adapter) 727 struct ks8842_adapter *adapter)
445{ 728{
446 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 729 netdev_dbg(netdev, "%s: entry\n", __func__);
447 netdev->stats.rx_errors++; 730 netdev->stats.rx_errors++;
448 netdev->stats.rx_fifo_errors++; 731 netdev->stats.rx_fifo_errors++;
449} 732}
@@ -462,20 +745,32 @@ void ks8842_tasklet(unsigned long arg)
462 spin_unlock_irqrestore(&adapter->lock, flags); 745 spin_unlock_irqrestore(&adapter->lock, flags);
463 746
464 isr = ks8842_read16(adapter, 18, REG_ISR); 747 isr = ks8842_read16(adapter, 18, REG_ISR);
465 dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr); 748 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
749
750 /* when running in DMA mode, do not ack RX interrupts, it is handled
751 internally by timberdale, otherwise it's DMA FIFO:s would stop
752 */
753 if (KS8842_USE_DMA(adapter))
754 isr &= ~IRQ_RX;
466 755
467 /* Ack */ 756 /* Ack */
468 ks8842_write16(adapter, 18, isr, REG_ISR); 757 ks8842_write16(adapter, 18, isr, REG_ISR);
469 758
759 if (!(adapter->conf_flags & MICREL_KS884X))
760 /* Ack in the timberdale IP as well */
761 iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR);
762
470 if (!netif_running(netdev)) 763 if (!netif_running(netdev))
471 return; 764 return;
472 765
473 if (isr & IRQ_LINK_CHANGE) 766 if (isr & IRQ_LINK_CHANGE)
474 ks8842_update_link_status(netdev, adapter); 767 ks8842_update_link_status(netdev, adapter);
475 768
476 if (isr & (IRQ_RX | IRQ_RX_ERROR)) 769 /* should not get IRQ_RX when running DMA mode */
770 if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter))
477 ks8842_handle_rx(netdev, adapter); 771 ks8842_handle_rx(netdev, adapter);
478 772
773 /* should only happen when in PIO mode */
479 if (isr & IRQ_TX) 774 if (isr & IRQ_TX)
480 ks8842_handle_tx(netdev, adapter); 775 ks8842_handle_tx(netdev, adapter);
481 776
@@ -494,24 +789,38 @@ void ks8842_tasklet(unsigned long arg)
494 789
495 /* re-enable interrupts, put back the bank selection register */ 790 /* re-enable interrupts, put back the bank selection register */
496 spin_lock_irqsave(&adapter->lock, flags); 791 spin_lock_irqsave(&adapter->lock, flags);
497 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 792 if (KS8842_USE_DMA(adapter))
793 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER);
794 else
795 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER);
498 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 796 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
797
798 /* Make sure timberdale continues DMA operations, they are stopped while
799 we are handling the ks8842 because we might change bank */
800 if (KS8842_USE_DMA(adapter))
801 ks8842_resume_dma(adapter);
802
499 spin_unlock_irqrestore(&adapter->lock, flags); 803 spin_unlock_irqrestore(&adapter->lock, flags);
500} 804}
501 805
502static irqreturn_t ks8842_irq(int irq, void *devid) 806static irqreturn_t ks8842_irq(int irq, void *devid)
503{ 807{
504 struct ks8842_adapter *adapter = devid; 808 struct net_device *netdev = devid;
809 struct ks8842_adapter *adapter = netdev_priv(netdev);
505 u16 isr; 810 u16 isr;
506 u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); 811 u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK);
507 irqreturn_t ret = IRQ_NONE; 812 irqreturn_t ret = IRQ_NONE;
508 813
509 isr = ks8842_read16(adapter, 18, REG_ISR); 814 isr = ks8842_read16(adapter, 18, REG_ISR);
510 dev_dbg(&adapter->pdev->dev, "%s - ISR: 0x%x\n", __func__, isr); 815 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr);
511 816
512 if (isr) { 817 if (isr) {
513 /* disable IRQ */ 818 if (KS8842_USE_DMA(adapter))
514 ks8842_write16(adapter, 18, 0x00, REG_IER); 819 /* disable all but RX IRQ, since the FPGA relies on it*/
820 ks8842_write16(adapter, 18, IRQ_RX, REG_IER);
821 else
822 /* disable IRQ */
823 ks8842_write16(adapter, 18, 0x00, REG_IER);
515 824
516 /* schedule tasklet */ 825 /* schedule tasklet */
517 tasklet_schedule(&adapter->tasklet); 826 tasklet_schedule(&adapter->tasklet);
@@ -521,9 +830,151 @@ static irqreturn_t ks8842_irq(int irq, void *devid)
521 830
522 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 831 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK);
523 832
833 /* After an interrupt, tell timberdale to continue DMA operations.
834 DMA is disabled while we are handling the ks8842 because we might
835 change bank */
836 ks8842_resume_dma(adapter);
837
524 return ret; 838 return ret;
525} 839}
526 840
841static void ks8842_dma_rx_cb(void *data)
842{
843 struct net_device *netdev = data;
844 struct ks8842_adapter *adapter = netdev_priv(netdev);
845
846 netdev_dbg(netdev, "RX DMA finished\n");
847 /* schedule tasklet */
848 if (adapter->dma_rx.adesc)
849 tasklet_schedule(&adapter->dma_rx.tasklet);
850}
851
852static void ks8842_dma_tx_cb(void *data)
853{
854 struct net_device *netdev = data;
855 struct ks8842_adapter *adapter = netdev_priv(netdev);
856 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx;
857
858 netdev_dbg(netdev, "TX DMA finished\n");
859
860 if (!ctl->adesc)
861 return;
862
863 netdev->stats.tx_packets++;
864 ctl->adesc = NULL;
865
866 if (netif_queue_stopped(netdev))
867 netif_wake_queue(netdev);
868}
869
870static void ks8842_stop_dma(struct ks8842_adapter *adapter)
871{
872 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
873 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
874
875 tx_ctl->adesc = NULL;
876 if (tx_ctl->chan)
877 tx_ctl->chan->device->device_control(tx_ctl->chan,
878 DMA_TERMINATE_ALL, 0);
879
880 rx_ctl->adesc = NULL;
881 if (rx_ctl->chan)
882 rx_ctl->chan->device->device_control(rx_ctl->chan,
883 DMA_TERMINATE_ALL, 0);
884
885 if (sg_dma_address(&rx_ctl->sg))
886 dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg),
887 DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
888 sg_dma_address(&rx_ctl->sg) = 0;
889
890 dev_kfree_skb(rx_ctl->skb);
891 rx_ctl->skb = NULL;
892}
893
894static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter)
895{
896 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
897 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
898
899 ks8842_stop_dma(adapter);
900
901 if (tx_ctl->chan)
902 dma_release_channel(tx_ctl->chan);
903 tx_ctl->chan = NULL;
904
905 if (rx_ctl->chan)
906 dma_release_channel(rx_ctl->chan);
907 rx_ctl->chan = NULL;
908
909 tasklet_kill(&rx_ctl->tasklet);
910
911 if (sg_dma_address(&tx_ctl->sg))
912 dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg),
913 DMA_BUFFER_SIZE, DMA_TO_DEVICE);
914 sg_dma_address(&tx_ctl->sg) = 0;
915
916 kfree(tx_ctl->buf);
917 tx_ctl->buf = NULL;
918}
919
920static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param)
921{
922 return chan->chan_id == (long)filter_param;
923}
924
925static int ks8842_alloc_dma_bufs(struct net_device *netdev)
926{
927 struct ks8842_adapter *adapter = netdev_priv(netdev);
928 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx;
929 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx;
930 int err;
931
932 dma_cap_mask_t mask;
933
934 dma_cap_zero(mask);
935 dma_cap_set(DMA_SLAVE, mask);
936 dma_cap_set(DMA_PRIVATE, mask);
937
938 sg_init_table(&tx_ctl->sg, 1);
939
940 tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
941 (void *)(long)tx_ctl->channel);
942 if (!tx_ctl->chan) {
943 err = -ENODEV;
944 goto err;
945 }
946
947 /* allocate DMA buffer */
948 tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL);
949 if (!tx_ctl->buf) {
950 err = -ENOMEM;
951 goto err;
952 }
953
954 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
955 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
956 err = dma_mapping_error(adapter->dev,
957 sg_dma_address(&tx_ctl->sg));
958 if (err) {
959 sg_dma_address(&tx_ctl->sg) = 0;
960 goto err;
961 }
962
963 rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn,
964 (void *)(long)rx_ctl->channel);
965 if (!rx_ctl->chan) {
966 err = -ENODEV;
967 goto err;
968 }
969
970 tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
971 (unsigned long)netdev);
972
973 return 0;
974err:
975 ks8842_dealloc_dma_bufs(adapter);
976 return err;
977}
527 978
528/* Netdevice operations */ 979/* Netdevice operations */
529 980
@@ -532,7 +983,26 @@ static int ks8842_open(struct net_device *netdev)
532 struct ks8842_adapter *adapter = netdev_priv(netdev); 983 struct ks8842_adapter *adapter = netdev_priv(netdev);
533 int err; 984 int err;
534 985
535 dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__); 986 netdev_dbg(netdev, "%s - entry\n", __func__);
987
988 if (KS8842_USE_DMA(adapter)) {
989 err = ks8842_alloc_dma_bufs(netdev);
990
991 if (!err) {
992 /* start RX dma */
993 err = __ks8842_start_new_rx_dma(netdev);
994 if (err)
995 ks8842_dealloc_dma_bufs(adapter);
996 }
997
998 if (err) {
999 printk(KERN_WARNING DRV_NAME
1000 ": Failed to initiate DMA, running PIO\n");
1001 ks8842_dealloc_dma_bufs(adapter);
1002 adapter->dma_rx.channel = -1;
1003 adapter->dma_tx.channel = -1;
1004 }
1005 }
536 1006
537 /* reset the HW */ 1007 /* reset the HW */
538 ks8842_reset_hw(adapter); 1008 ks8842_reset_hw(adapter);
@@ -542,7 +1012,7 @@ static int ks8842_open(struct net_device *netdev)
542 ks8842_update_link_status(netdev, adapter); 1012 ks8842_update_link_status(netdev, adapter);
543 1013
544 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, 1014 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
545 adapter); 1015 netdev);
546 if (err) { 1016 if (err) {
547 pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); 1017 pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
548 return err; 1018 return err;
@@ -555,10 +1025,15 @@ static int ks8842_close(struct net_device *netdev)
555{ 1025{
556 struct ks8842_adapter *adapter = netdev_priv(netdev); 1026 struct ks8842_adapter *adapter = netdev_priv(netdev);
557 1027
558 dev_dbg(&adapter->pdev->dev, "%s - entry\n", __func__); 1028 netdev_dbg(netdev, "%s - entry\n", __func__);
1029
1030 cancel_work_sync(&adapter->timeout_work);
1031
1032 if (KS8842_USE_DMA(adapter))
1033 ks8842_dealloc_dma_bufs(adapter);
559 1034
560 /* free the irq */ 1035 /* free the irq */
561 free_irq(adapter->irq, adapter); 1036 free_irq(adapter->irq, netdev);
562 1037
563 /* disable the switch */ 1038 /* disable the switch */
564 ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE); 1039 ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE);
@@ -572,7 +1047,18 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
572 int ret; 1047 int ret;
573 struct ks8842_adapter *adapter = netdev_priv(netdev); 1048 struct ks8842_adapter *adapter = netdev_priv(netdev);
574 1049
575 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 1050 netdev_dbg(netdev, "%s: entry\n", __func__);
1051
1052 if (KS8842_USE_DMA(adapter)) {
1053 unsigned long flags;
1054 ret = ks8842_tx_frame_dma(skb, netdev);
1055 /* for now only allow one transfer at the time */
1056 spin_lock_irqsave(&adapter->lock, flags);
1057 if (adapter->dma_tx.adesc)
1058 netif_stop_queue(netdev);
1059 spin_unlock_irqrestore(&adapter->lock, flags);
1060 return ret;
1061 }
576 1062
577 ret = ks8842_tx_frame(skb, netdev); 1063 ret = ks8842_tx_frame(skb, netdev);
578 1064
@@ -588,7 +1074,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
588 struct sockaddr *addr = p; 1074 struct sockaddr *addr = p;
589 char *mac = (u8 *)addr->sa_data; 1075 char *mac = (u8 *)addr->sa_data;
590 1076
591 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 1077 netdev_dbg(netdev, "%s: entry\n", __func__);
592 1078
593 if (!is_valid_ether_addr(addr->sa_data)) 1079 if (!is_valid_ether_addr(addr->sa_data))
594 return -EADDRNOTAVAIL; 1080 return -EADDRNOTAVAIL;
@@ -599,17 +1085,26 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
599 return 0; 1085 return 0;
600} 1086}
601 1087
602static void ks8842_tx_timeout(struct net_device *netdev) 1088static void ks8842_tx_timeout_work(struct work_struct *work)
603{ 1089{
604 struct ks8842_adapter *adapter = netdev_priv(netdev); 1090 struct ks8842_adapter *adapter =
1091 container_of(work, struct ks8842_adapter, timeout_work);
1092 struct net_device *netdev = adapter->netdev;
605 unsigned long flags; 1093 unsigned long flags;
606 1094
607 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 1095 netdev_dbg(netdev, "%s: entry\n", __func__);
608 1096
609 spin_lock_irqsave(&adapter->lock, flags); 1097 spin_lock_irqsave(&adapter->lock, flags);
1098
1099 if (KS8842_USE_DMA(adapter))
1100 ks8842_stop_dma(adapter);
1101
610 /* disable interrupts */ 1102 /* disable interrupts */
611 ks8842_write16(adapter, 18, 0, REG_IER); 1103 ks8842_write16(adapter, 18, 0, REG_IER);
612 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); 1104 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR);
1105
1106 netif_stop_queue(netdev);
1107
613 spin_unlock_irqrestore(&adapter->lock, flags); 1108 spin_unlock_irqrestore(&adapter->lock, flags);
614 1109
615 ks8842_reset_hw(adapter); 1110 ks8842_reset_hw(adapter);
@@ -617,6 +1112,18 @@ static void ks8842_tx_timeout(struct net_device *netdev)
617 ks8842_write_mac_addr(adapter, netdev->dev_addr); 1112 ks8842_write_mac_addr(adapter, netdev->dev_addr);
618 1113
619 ks8842_update_link_status(netdev, adapter); 1114 ks8842_update_link_status(netdev, adapter);
1115
1116 if (KS8842_USE_DMA(adapter))
1117 __ks8842_start_new_rx_dma(netdev);
1118}
1119
1120static void ks8842_tx_timeout(struct net_device *netdev)
1121{
1122 struct ks8842_adapter *adapter = netdev_priv(netdev);
1123
1124 netdev_dbg(netdev, "%s: entry\n", __func__);
1125
1126 schedule_work(&adapter->timeout_work);
620} 1127}
621 1128
622static const struct net_device_ops ks8842_netdev_ops = { 1129static const struct net_device_ops ks8842_netdev_ops = {
@@ -653,7 +1160,11 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
653 SET_NETDEV_DEV(netdev, &pdev->dev); 1160 SET_NETDEV_DEV(netdev, &pdev->dev);
654 1161
655 adapter = netdev_priv(netdev); 1162 adapter = netdev_priv(netdev);
1163 adapter->netdev = netdev;
1164 INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work);
656 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); 1165 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem));
1166 adapter->conf_flags = iomem->flags;
1167
657 if (!adapter->hw_addr) 1168 if (!adapter->hw_addr)
658 goto err_ioremap; 1169 goto err_ioremap;
659 1170
@@ -663,7 +1174,18 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
663 goto err_get_irq; 1174 goto err_get_irq;
664 } 1175 }
665 1176
666 adapter->pdev = pdev; 1177 adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev;
1178
1179 /* DMA is only supported when accessed via timberdale */
1180 if (!(adapter->conf_flags & MICREL_KS884X) && pdata &&
1181 (pdata->tx_dma_channel != -1) &&
1182 (pdata->rx_dma_channel != -1)) {
1183 adapter->dma_rx.channel = pdata->rx_dma_channel;
1184 adapter->dma_tx.channel = pdata->tx_dma_channel;
1185 } else {
1186 adapter->dma_rx.channel = -1;
1187 adapter->dma_tx.channel = -1;
1188 }
667 1189
668 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); 1190 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
669 spin_lock_init(&adapter->lock); 1191 spin_lock_init(&adapter->lock);