aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/arm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/arm')
-rw-r--r--drivers/net/arm/ks8695net.c103
1 files changed, 78 insertions, 25 deletions
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 2a7b7745cc55..ed0b0f3b7122 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -35,12 +35,15 @@
35 35
36#include <mach/regs-switch.h> 36#include <mach/regs-switch.h>
37#include <mach/regs-misc.h> 37#include <mach/regs-misc.h>
38#include <asm/mach/irq.h>
39#include <mach/regs-irq.h>
38 40
39#include "ks8695net.h" 41#include "ks8695net.h"
40 42
41#define MODULENAME "ks8695_ether" 43#define MODULENAME "ks8695_ether"
42#define MODULEVERSION "1.01" 44#define MODULEVERSION "1.01"
43 45
46
44/* 47/*
45 * Transmit and device reset timeout, default 5 seconds. 48 * Transmit and device reset timeout, default 5 seconds.
46 */ 49 */
@@ -152,6 +155,8 @@ struct ks8695_priv {
152 enum ks8695_dtype dtype; 155 enum ks8695_dtype dtype;
153 void __iomem *io_regs; 156 void __iomem *io_regs;
154 157
158 struct napi_struct napi;
159
155 const char *rx_irq_name, *tx_irq_name, *link_irq_name; 160 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
156 int rx_irq, tx_irq, link_irq; 161 int rx_irq, tx_irq, link_irq;
157 162
@@ -172,6 +177,7 @@ struct ks8695_priv {
172 dma_addr_t rx_ring_dma; 177 dma_addr_t rx_ring_dma;
173 struct ks8695_skbuff rx_buffers[MAX_RX_DESC]; 178 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
174 int next_rx_desc_read; 179 int next_rx_desc_read;
180 spinlock_t rx_lock;
175 181
176 int msg_enable; 182 int msg_enable;
177}; 183};
@@ -396,25 +402,53 @@ ks8695_tx_irq(int irq, void *dev_id)
396 * @irq: The IRQ which went off (ignored) 402 * @irq: The IRQ which went off (ignored)
397 * @dev_id: The net_device for the interrupt 403 * @dev_id: The net_device for the interrupt
398 * 404 *
399 * Process the RX ring, passing any received packets up to the 405 * Use NAPI to receive packets.
400 * host. If we received anything other than errors, we then
401 * refill the ring.
402 */ 406 */
407
403static irqreturn_t 408static irqreturn_t
404ks8695_rx_irq(int irq, void *dev_id) 409ks8695_rx_irq(int irq, void *dev_id)
405{ 410{
406 struct net_device *ndev = (struct net_device *)dev_id; 411 struct net_device *ndev = (struct net_device *)dev_id;
407 struct ks8695_priv *ksp = netdev_priv(ndev); 412 struct ks8695_priv *ksp = netdev_priv(ndev);
413 unsigned long status;
414
415 unsigned long mask_bit = 1 << ksp->rx_irq;
416
417 spin_lock(&ksp->rx_lock);
418
419 status = readl(KS8695_IRQ_VA + KS8695_INTST);
420
421 /*clean rx status bit*/
422 writel(status | mask_bit , KS8695_IRQ_VA + KS8695_INTST);
423
424 if (status & mask_bit) {
425 if (napi_schedule_prep(&ksp->napi)) {
426 /*disable rx interrupt*/
427 status &= ~mask_bit;
428 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
429 __napi_schedule(&ksp->napi);
430 }
431 }
432
433 spin_unlock(&ksp->rx_lock);
434 return IRQ_HANDLED;
435}
436
437static int ks8695_rx(struct net_device *ndev, int budget)
438{
439 struct ks8695_priv *ksp = netdev_priv(ndev);
408 struct sk_buff *skb; 440 struct sk_buff *skb;
409 int buff_n; 441 int buff_n;
410 u32 flags; 442 u32 flags;
411 int pktlen; 443 int pktlen;
412 int last_rx_processed = -1; 444 int last_rx_processed = -1;
445 int received = 0;
413 446
414 buff_n = ksp->next_rx_desc_read; 447 buff_n = ksp->next_rx_desc_read;
415 do { 448 while (received < budget
416 if (ksp->rx_buffers[buff_n].skb && 449 && ksp->rx_buffers[buff_n].skb
417 !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) { 450 && (!(ksp->rx_ring[buff_n].status &
451 cpu_to_le32(RDES_OWN)))) {
418 rmb(); 452 rmb();
419 flags = le32_to_cpu(ksp->rx_ring[buff_n].status); 453 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
420 /* Found an SKB which we own, this means we 454 /* Found an SKB which we own, this means we
@@ -464,7 +498,7 @@ ks8695_rx_irq(int irq, void *dev_id)
464 /* Relinquish the SKB to the network layer */ 498 /* Relinquish the SKB to the network layer */
465 skb_put(skb, pktlen); 499 skb_put(skb, pktlen);
466 skb->protocol = eth_type_trans(skb, ndev); 500 skb->protocol = eth_type_trans(skb, ndev);
467 netif_rx(skb); 501 netif_receive_skb(skb);
468 502
469 /* Record stats */ 503 /* Record stats */
470 ndev->stats.rx_packets++; 504 ndev->stats.rx_packets++;
@@ -478,29 +512,44 @@ rx_failure:
478 /* Give the ring entry back to the hardware */ 512 /* Give the ring entry back to the hardware */
479 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); 513 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
480rx_finished: 514rx_finished:
515 received++;
481 /* And note this as processed so we can start 516 /* And note this as processed so we can start
482 * from here next time 517 * from here next time
483 */ 518 */
484 last_rx_processed = buff_n; 519 last_rx_processed = buff_n;
485 } else { 520 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
486 /* Ran out of things to process, stop now */ 521 /*And note which RX descriptor we last did */
487 break; 522 if (likely(last_rx_processed != -1))
488 } 523 ksp->next_rx_desc_read =
489 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; 524 (last_rx_processed + 1) &
490 } while (buff_n != ksp->next_rx_desc_read); 525 MAX_RX_DESC_MASK;
491 526
492 /* And note which RX descriptor we last did anything with */ 527 /* And refill the buffers */
493 if (likely(last_rx_processed != -1)) 528 ks8695_refill_rxbuffers(ksp);
494 ksp->next_rx_desc_read = 529 }
495 (last_rx_processed + 1) & MAX_RX_DESC_MASK; 530 return received;
496 531}
497 /* And refill the buffers */
498 ks8695_refill_rxbuffers(ksp);
499
500 /* Kick the RX DMA engine, in case it became suspended */
501 ks8695_writereg(ksp, KS8695_DRSC, 0);
502 532
503 return IRQ_HANDLED; 533static int ks8695_poll(struct napi_struct *napi, int budget)
534{
535 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
536 struct net_device *dev = ksp->ndev;
537 unsigned long mask_bit = 1 << ksp->rx_irq;
538 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
539
540 unsigned long work_done ;
541
542 work_done = ks8695_rx(dev, budget);
543
544 if (work_done < budget) {
545 unsigned long flags;
546 spin_lock_irqsave(&ksp->rx_lock, flags);
547 /*enable rx interrupt*/
548 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
549 __napi_complete(napi);
550 spin_unlock_irqrestore(&ksp->rx_lock, flags);
551 }
552 return work_done;
504} 553}
505 554
506/** 555/**
@@ -1472,6 +1521,8 @@ ks8695_probe(struct platform_device *pdev)
1472 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1521 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1473 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1522 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1474 1523
1524 netif_napi_add(ndev, &ksp->napi, ks8695_poll, 64);
1525
1475 /* Retrieve the default MAC addr from the chip. */ 1526 /* Retrieve the default MAC addr from the chip. */
1476 /* The bootloader should have left it in there for us. */ 1527 /* The bootloader should have left it in there for us. */
1477 1528
@@ -1505,6 +1556,7 @@ ks8695_probe(struct platform_device *pdev)
1505 1556
1506 /* And initialise the queue's lock */ 1557 /* And initialise the queue's lock */
1507 spin_lock_init(&ksp->txq_lock); 1558 spin_lock_init(&ksp->txq_lock);
1559 spin_lock_init(&ksp->rx_lock);
1508 1560
1509 /* Specify the RX DMA ring buffer */ 1561 /* Specify the RX DMA ring buffer */
1510 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE; 1562 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
@@ -1626,6 +1678,7 @@ ks8695_drv_remove(struct platform_device *pdev)
1626 struct ks8695_priv *ksp = netdev_priv(ndev); 1678 struct ks8695_priv *ksp = netdev_priv(ndev);
1627 1679
1628 platform_set_drvdata(pdev, NULL); 1680 platform_set_drvdata(pdev, NULL);
1681 netif_napi_del(&ksp->napi);
1629 1682
1630 unregister_netdev(ndev); 1683 unregister_netdev(ndev);
1631 ks8695_release_device(ksp); 1684 ks8695_release_device(ksp);