aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/korina.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/korina.c')
-rw-r--r--drivers/net/korina.c173
1 files changed, 89 insertions, 84 deletions
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 4a5580c1126a..75010cac76ac 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -84,7 +84,10 @@
84#define KORINA_NUM_RDS 64 /* number of receive descriptors */ 84#define KORINA_NUM_RDS 64 /* number of receive descriptors */
85#define KORINA_NUM_TDS 64 /* number of transmit descriptors */ 85#define KORINA_NUM_TDS 64 /* number of transmit descriptors */
86 86
87#define KORINA_RBSIZE 536 /* size of one resource buffer = Ether MTU */ 87/* KORINA_RBSIZE is the hardware's default maximum receive
88 * frame size in bytes. Having this hardcoded means that there
89 * is no support for MTU sizes greater than 1500. */
90#define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */
88#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1) 91#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
89#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1) 92#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
90#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc)) 93#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
@@ -196,7 +199,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
196 struct korina_private *lp = netdev_priv(dev); 199 struct korina_private *lp = netdev_priv(dev);
197 unsigned long flags; 200 unsigned long flags;
198 u32 length; 201 u32 length;
199 u32 chain_index; 202 u32 chain_prev, chain_next;
200 struct dma_desc *td; 203 struct dma_desc *td;
201 204
202 spin_lock_irqsave(&lp->lock, flags); 205 spin_lock_irqsave(&lp->lock, flags);
@@ -228,8 +231,8 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
228 /* Setup the transmit descriptor. */ 231 /* Setup the transmit descriptor. */
229 dma_cache_inv((u32) td, sizeof(*td)); 232 dma_cache_inv((u32) td, sizeof(*td));
230 td->ca = CPHYSADDR(skb->data); 233 td->ca = CPHYSADDR(skb->data);
231 chain_index = (lp->tx_chain_tail - 1) & 234 chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
232 KORINA_TDS_MASK; 235 chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
233 236
234 if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) { 237 if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
235 if (lp->tx_chain_status == desc_empty) { 238 if (lp->tx_chain_status == desc_empty) {
@@ -237,7 +240,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
237 td->control = DMA_COUNT(length) | 240 td->control = DMA_COUNT(length) |
238 DMA_DESC_COF | DMA_DESC_IOF; 241 DMA_DESC_COF | DMA_DESC_IOF;
239 /* Move tail */ 242 /* Move tail */
240 lp->tx_chain_tail = chain_index; 243 lp->tx_chain_tail = chain_next;
241 /* Write to NDPTR */ 244 /* Write to NDPTR */
242 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), 245 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
243 &lp->tx_dma_regs->dmandptr); 246 &lp->tx_dma_regs->dmandptr);
@@ -248,12 +251,12 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
248 td->control = DMA_COUNT(length) | 251 td->control = DMA_COUNT(length) |
249 DMA_DESC_COF | DMA_DESC_IOF; 252 DMA_DESC_COF | DMA_DESC_IOF;
250 /* Link to prev */ 253 /* Link to prev */
251 lp->td_ring[chain_index].control &= 254 lp->td_ring[chain_prev].control &=
252 ~DMA_DESC_COF; 255 ~DMA_DESC_COF;
253 /* Link to prev */ 256 /* Link to prev */
254 lp->td_ring[chain_index].link = CPHYSADDR(td); 257 lp->td_ring[chain_prev].link = CPHYSADDR(td);
255 /* Move tail */ 258 /* Move tail */
256 lp->tx_chain_tail = chain_index; 259 lp->tx_chain_tail = chain_next;
257 /* Write to NDPTR */ 260 /* Write to NDPTR */
258 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), 261 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
259 &(lp->tx_dma_regs->dmandptr)); 262 &(lp->tx_dma_regs->dmandptr));
@@ -267,17 +270,16 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
267 td->control = DMA_COUNT(length) | 270 td->control = DMA_COUNT(length) |
268 DMA_DESC_COF | DMA_DESC_IOF; 271 DMA_DESC_COF | DMA_DESC_IOF;
269 /* Move tail */ 272 /* Move tail */
270 lp->tx_chain_tail = chain_index; 273 lp->tx_chain_tail = chain_next;
271 lp->tx_chain_status = desc_filled; 274 lp->tx_chain_status = desc_filled;
272 netif_stop_queue(dev);
273 } else { 275 } else {
274 /* Update tail */ 276 /* Update tail */
275 td->control = DMA_COUNT(length) | 277 td->control = DMA_COUNT(length) |
276 DMA_DESC_COF | DMA_DESC_IOF; 278 DMA_DESC_COF | DMA_DESC_IOF;
277 lp->td_ring[chain_index].control &= 279 lp->td_ring[chain_prev].control &=
278 ~DMA_DESC_COF; 280 ~DMA_DESC_COF;
279 lp->td_ring[chain_index].link = CPHYSADDR(td); 281 lp->td_ring[chain_prev].link = CPHYSADDR(td);
280 lp->tx_chain_tail = chain_index; 282 lp->tx_chain_tail = chain_next;
281 } 283 }
282 } 284 }
283 dma_cache_wback((u32) td, sizeof(*td)); 285 dma_cache_wback((u32) td, sizeof(*td));
@@ -327,13 +329,13 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
327 329
328 dmas = readl(&lp->rx_dma_regs->dmas); 330 dmas = readl(&lp->rx_dma_regs->dmas);
329 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) { 331 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
330 netif_rx_schedule_prep(&lp->napi);
331
332 dmasm = readl(&lp->rx_dma_regs->dmasm); 332 dmasm = readl(&lp->rx_dma_regs->dmasm);
333 writel(dmasm | (DMA_STAT_DONE | 333 writel(dmasm | (DMA_STAT_DONE |
334 DMA_STAT_HALT | DMA_STAT_ERR), 334 DMA_STAT_HALT | DMA_STAT_ERR),
335 &lp->rx_dma_regs->dmasm); 335 &lp->rx_dma_regs->dmasm);
336 336
337 netif_rx_schedule(&lp->napi);
338
337 if (dmas & DMA_STAT_ERR) 339 if (dmas & DMA_STAT_ERR)
338 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name); 340 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
339 341
@@ -350,15 +352,20 @@ static int korina_rx(struct net_device *dev, int limit)
350 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done]; 352 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
351 struct sk_buff *skb, *skb_new; 353 struct sk_buff *skb, *skb_new;
352 u8 *pkt_buf; 354 u8 *pkt_buf;
353 u32 devcs, pkt_len, dmas, rx_free_desc; 355 u32 devcs, pkt_len, dmas;
354 int count; 356 int count;
355 357
356 dma_cache_inv((u32)rd, sizeof(*rd)); 358 dma_cache_inv((u32)rd, sizeof(*rd));
357 359
358 for (count = 0; count < limit; count++) { 360 for (count = 0; count < limit; count++) {
361 skb = lp->rx_skb[lp->rx_next_done];
362 skb_new = NULL;
359 363
360 devcs = rd->devcs; 364 devcs = rd->devcs;
361 365
366 if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
367 break;
368
362 /* Update statistics counters */ 369 /* Update statistics counters */
363 if (devcs & ETH_RX_CRC) 370 if (devcs & ETH_RX_CRC)
364 dev->stats.rx_crc_errors++; 371 dev->stats.rx_crc_errors++;
@@ -381,63 +388,58 @@ static int korina_rx(struct net_device *dev, int limit)
381 * in Rc32434 (errata ref #077) */ 388 * in Rc32434 (errata ref #077) */
382 dev->stats.rx_errors++; 389 dev->stats.rx_errors++;
383 dev->stats.rx_dropped++; 390 dev->stats.rx_dropped++;
384 } 391 } else if ((devcs & ETH_RX_ROK)) {
385
386 while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
387 /* init the var. used for the later
388 * operations within the while loop */
389 skb_new = NULL;
390 pkt_len = RCVPKT_LENGTH(devcs); 392 pkt_len = RCVPKT_LENGTH(devcs);
391 skb = lp->rx_skb[lp->rx_next_done]; 393
392 394 /* must be the (first and) last
393 if ((devcs & ETH_RX_ROK)) { 395 * descriptor then */
394 /* must be the (first and) last 396 pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
395 * descriptor then */ 397
396 pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; 398 /* invalidate the cache */
397 399 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
398 /* invalidate the cache */ 400
399 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); 401 /* Malloc up new buffer. */
400 402 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
401 /* Malloc up new buffer. */ 403
402 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2); 404 if (!skb_new)
403 405 break;
404 if (!skb_new) 406 /* Do not count the CRC */
405 break; 407 skb_put(skb, pkt_len - 4);
406 /* Do not count the CRC */ 408 skb->protocol = eth_type_trans(skb, dev);
407 skb_put(skb, pkt_len - 4); 409
408 skb->protocol = eth_type_trans(skb, dev); 410 /* Pass the packet to upper layers */
409 411 netif_receive_skb(skb);
410 /* Pass the packet to upper layers */ 412 dev->stats.rx_packets++;
411 netif_receive_skb(skb); 413 dev->stats.rx_bytes += pkt_len;
412 dev->stats.rx_packets++; 414
413 dev->stats.rx_bytes += pkt_len; 415 /* Update the mcast stats */
414 416 if (devcs & ETH_RX_MP)
415 /* Update the mcast stats */ 417 dev->stats.multicast++;
416 if (devcs & ETH_RX_MP) 418
417 dev->stats.multicast++; 419 /* 16 bit align */
418 420 skb_reserve(skb_new, 2);
419 lp->rx_skb[lp->rx_next_done] = skb_new; 421
420 } 422 lp->rx_skb[lp->rx_next_done] = skb_new;
421
422 rd->devcs = 0;
423
424 /* Restore descriptor's curr_addr */
425 if (skb_new)
426 rd->ca = CPHYSADDR(skb_new->data);
427 else
428 rd->ca = CPHYSADDR(skb->data);
429
430 rd->control = DMA_COUNT(KORINA_RBSIZE) |
431 DMA_DESC_COD | DMA_DESC_IOD;
432 lp->rd_ring[(lp->rx_next_done - 1) &
433 KORINA_RDS_MASK].control &=
434 ~DMA_DESC_COD;
435
436 lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
437 dma_cache_wback((u32)rd, sizeof(*rd));
438 rd = &lp->rd_ring[lp->rx_next_done];
439 writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
440 } 423 }
424
425 rd->devcs = 0;
426
427 /* Restore descriptor's curr_addr */
428 if (skb_new)
429 rd->ca = CPHYSADDR(skb_new->data);
430 else
431 rd->ca = CPHYSADDR(skb->data);
432
433 rd->control = DMA_COUNT(KORINA_RBSIZE) |
434 DMA_DESC_COD | DMA_DESC_IOD;
435 lp->rd_ring[(lp->rx_next_done - 1) &
436 KORINA_RDS_MASK].control &=
437 ~DMA_DESC_COD;
438
439 lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
440 dma_cache_wback((u32)rd, sizeof(*rd));
441 rd = &lp->rd_ring[lp->rx_next_done];
442 writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
441 } 443 }
442 444
443 dmas = readl(&lp->rx_dma_regs->dmas); 445 dmas = readl(&lp->rx_dma_regs->dmas);
@@ -623,12 +625,12 @@ korina_tx_dma_interrupt(int irq, void *dev_id)
623 dmas = readl(&lp->tx_dma_regs->dmas); 625 dmas = readl(&lp->tx_dma_regs->dmas);
624 626
625 if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) { 627 if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
626 korina_tx(dev);
627
628 dmasm = readl(&lp->tx_dma_regs->dmasm); 628 dmasm = readl(&lp->tx_dma_regs->dmasm);
629 writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR), 629 writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
630 &lp->tx_dma_regs->dmasm); 630 &lp->tx_dma_regs->dmasm);
631 631
632 korina_tx(dev);
633
632 if (lp->tx_chain_status == desc_filled && 634 if (lp->tx_chain_status == desc_filled &&
633 (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) { 635 (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
634 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]), 636 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
@@ -741,6 +743,7 @@ static struct ethtool_ops netdev_ethtool_ops = {
741static void korina_alloc_ring(struct net_device *dev) 743static void korina_alloc_ring(struct net_device *dev)
742{ 744{
743 struct korina_private *lp = netdev_priv(dev); 745 struct korina_private *lp = netdev_priv(dev);
746 struct sk_buff *skb;
744 int i; 747 int i;
745 748
746 /* Initialize the transmit descriptors */ 749 /* Initialize the transmit descriptors */
@@ -756,8 +759,6 @@ static void korina_alloc_ring(struct net_device *dev)
756 759
757 /* Initialize the receive descriptors */ 760 /* Initialize the receive descriptors */
758 for (i = 0; i < KORINA_NUM_RDS; i++) { 761 for (i = 0; i < KORINA_NUM_RDS; i++) {
759 struct sk_buff *skb = lp->rx_skb[i];
760
761 skb = dev_alloc_skb(KORINA_RBSIZE + 2); 762 skb = dev_alloc_skb(KORINA_RBSIZE + 2);
762 if (!skb) 763 if (!skb)
763 break; 764 break;
@@ -770,11 +771,12 @@ static void korina_alloc_ring(struct net_device *dev)
770 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]); 771 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
771 } 772 }
772 773
773 /* loop back */ 774 /* loop back receive descriptors, so the last
774 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[0]); 775 * descriptor points to the first one */
775 lp->rx_next_done = 0; 776 lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
777 lp->rd_ring[i - 1].control |= DMA_DESC_COD;
776 778
777 lp->rd_ring[i].control |= DMA_DESC_COD; 779 lp->rx_next_done = 0;
778 lp->rx_chain_head = 0; 780 lp->rx_chain_head = 0;
779 lp->rx_chain_tail = 0; 781 lp->rx_chain_tail = 0;
780 lp->rx_chain_status = desc_empty; 782 lp->rx_chain_status = desc_empty;
@@ -901,6 +903,8 @@ static int korina_restart(struct net_device *dev)
901 903
902 korina_free_ring(dev); 904 korina_free_ring(dev);
903 905
906 napi_disable(&lp->napi);
907
904 ret = korina_init(dev); 908 ret = korina_init(dev);
905 if (ret < 0) { 909 if (ret < 0) {
906 printk(KERN_ERR DRV_NAME "%s: cannot restart device\n", 910 printk(KERN_ERR DRV_NAME "%s: cannot restart device\n",
@@ -999,14 +1003,14 @@ static int korina_open(struct net_device *dev)
999 * that handles the Done Finished 1003 * that handles the Done Finished
1000 * Ovr and Und Events */ 1004 * Ovr and Und Events */
1001 ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt, 1005 ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt,
1002 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Rx", dev); 1006 IRQF_DISABLED, "Korina ethernet Rx", dev);
1003 if (ret < 0) { 1007 if (ret < 0) {
1004 printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n", 1008 printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n",
1005 dev->name, lp->rx_irq); 1009 dev->name, lp->rx_irq);
1006 goto err_release; 1010 goto err_release;
1007 } 1011 }
1008 ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt, 1012 ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt,
1009 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Tx", dev); 1013 IRQF_DISABLED, "Korina ethernet Tx", dev);
1010 if (ret < 0) { 1014 if (ret < 0) {
1011 printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n", 1015 printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n",
1012 dev->name, lp->tx_irq); 1016 dev->name, lp->tx_irq);
@@ -1015,7 +1019,7 @@ static int korina_open(struct net_device *dev)
1015 1019
1016 /* Install handler for overrun error. */ 1020 /* Install handler for overrun error. */
1017 ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt, 1021 ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt,
1018 IRQF_SHARED | IRQF_DISABLED, "Ethernet Overflow", dev); 1022 IRQF_DISABLED, "Ethernet Overflow", dev);
1019 if (ret < 0) { 1023 if (ret < 0) {
1020 printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n", 1024 printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n",
1021 dev->name, lp->ovr_irq); 1025 dev->name, lp->ovr_irq);
@@ -1024,7 +1028,7 @@ static int korina_open(struct net_device *dev)
1024 1028
1025 /* Install handler for underflow error. */ 1029 /* Install handler for underflow error. */
1026 ret = request_irq(lp->und_irq, &korina_und_interrupt, 1030 ret = request_irq(lp->und_irq, &korina_und_interrupt,
1027 IRQF_SHARED | IRQF_DISABLED, "Ethernet Underflow", dev); 1031 IRQF_DISABLED, "Ethernet Underflow", dev);
1028 if (ret < 0) { 1032 if (ret < 0) {
1029 printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n", 1033 printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n",
1030 dev->name, lp->und_irq); 1034 dev->name, lp->und_irq);
@@ -1067,6 +1071,8 @@ static int korina_close(struct net_device *dev)
1067 1071
1068 korina_free_ring(dev); 1072 korina_free_ring(dev);
1069 1073
1074 napi_disable(&lp->napi);
1075
1070 free_irq(lp->rx_irq, dev); 1076 free_irq(lp->rx_irq, dev);
1071 free_irq(lp->tx_irq, dev); 1077 free_irq(lp->tx_irq, dev);
1072 free_irq(lp->ovr_irq, dev); 1078 free_irq(lp->ovr_irq, dev);
@@ -1089,7 +1095,6 @@ static int korina_probe(struct platform_device *pdev)
1089 return -ENOMEM; 1095 return -ENOMEM;
1090 } 1096 }
1091 SET_NETDEV_DEV(dev, &pdev->dev); 1097 SET_NETDEV_DEV(dev, &pdev->dev);
1092 platform_set_drvdata(pdev, dev);
1093 lp = netdev_priv(dev); 1098 lp = netdev_priv(dev);
1094 1099
1095 bif->dev = dev; 1100 bif->dev = dev;