aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sgiseeq.c
diff options
context:
space:
mode:
authorThomas Bogendoerfer <tsbogend@alpha.franken.de>2007-12-19 07:42:36 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:07:56 -0500
commit49b11bc3d43eb287fc9d78e1a892e97288980d49 (patch)
treece7605b5e8dc2edbea41336d479c537d52de8959 /drivers/net/sgiseeq.c
parentdb17f39564539e71c9b3a63e7ed5313fe311d266 (diff)
SGISEEQ: use cached memory access to make driver work on IP28
- Use inline functions for dma_sync_* instead of macros - added Kconfig change to make selection for similair SGI boxes easier Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Acked-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/sgiseeq.c')
-rw-r--r--drivers/net/sgiseeq.c64
1 files changed, 34 insertions, 30 deletions
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 3145ca13d255..c69bb8ba8b64 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -56,14 +56,6 @@ static char *sgiseeqstr = "SGI Seeq8003";
56 (dma_addr_t)((unsigned long)(v) - \ 56 (dma_addr_t)((unsigned long)(v) - \
57 (unsigned long)((sp)->rx_desc))) 57 (unsigned long)((sp)->rx_desc)))
58 58
59#define DMA_SYNC_DESC_CPU(dev, addr) \
60 do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
61 sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } while (0)
62
63#define DMA_SYNC_DESC_DEV(dev, addr) \
64 do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
65 sizeof(struct sgiseeq_rx_desc), DMA_TO_DEVICE); } while (0)
66
67/* Copy frames shorter than rx_copybreak, otherwise pass on up in 59/* Copy frames shorter than rx_copybreak, otherwise pass on up in
68 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). 60 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
69 */ 61 */
@@ -116,6 +108,18 @@ struct sgiseeq_private {
116 spinlock_t tx_lock; 108 spinlock_t tx_lock;
117}; 109};
118 110
111static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
112{
113 dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
114 DMA_FROM_DEVICE);
115}
116
117static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
118{
119 dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
120 DMA_TO_DEVICE);
121}
122
119static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) 123static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
120{ 124{
121 hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; 125 hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
@@ -184,7 +188,7 @@ static int seeq_init_ring(struct net_device *dev)
184 /* Setup tx ring. */ 188 /* Setup tx ring. */
185 for(i = 0; i < SEEQ_TX_BUFFERS; i++) { 189 for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
186 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; 190 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
187 DMA_SYNC_DESC_DEV(dev, &sp->tx_desc[i]); 191 dma_sync_desc_dev(dev, &sp->tx_desc[i]);
188 } 192 }
189 193
190 /* And now the rx ring. */ 194 /* And now the rx ring. */
@@ -203,10 +207,10 @@ static int seeq_init_ring(struct net_device *dev)
203 sp->rx_desc[i].rdma.pbuf = dma_addr; 207 sp->rx_desc[i].rdma.pbuf = dma_addr;
204 } 208 }
205 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; 209 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
206 DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i]); 210 dma_sync_desc_dev(dev, &sp->rx_desc[i]);
207 } 211 }
208 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; 212 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
209 DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i - 1]); 213 dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
210 return 0; 214 return 0;
211} 215}
212 216
@@ -341,7 +345,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
341 345
342 /* Service every received packet. */ 346 /* Service every received packet. */
343 rd = &sp->rx_desc[sp->rx_new]; 347 rd = &sp->rx_desc[sp->rx_new];
344 DMA_SYNC_DESC_CPU(dev, rd); 348 dma_sync_desc_cpu(dev, rd);
345 while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { 349 while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
346 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; 350 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
347 dma_unmap_single(dev->dev.parent, rd->rdma.pbuf, 351 dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
@@ -397,16 +401,16 @@ memory_squeeze:
397 /* Return the entry to the ring pool. */ 401 /* Return the entry to the ring pool. */
398 rd->rdma.cntinfo = RCNTINFO_INIT; 402 rd->rdma.cntinfo = RCNTINFO_INIT;
399 sp->rx_new = NEXT_RX(sp->rx_new); 403 sp->rx_new = NEXT_RX(sp->rx_new);
400 DMA_SYNC_DESC_DEV(dev, rd); 404 dma_sync_desc_dev(dev, rd);
401 rd = &sp->rx_desc[sp->rx_new]; 405 rd = &sp->rx_desc[sp->rx_new];
402 DMA_SYNC_DESC_CPU(dev, rd); 406 dma_sync_desc_cpu(dev, rd);
403 } 407 }
404 DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[orig_end]); 408 dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
405 sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); 409 sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
406 DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[orig_end]); 410 dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
407 DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); 411 dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
408 sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; 412 sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
409 DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]); 413 dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
410 rx_maybe_restart(sp, hregs, sregs); 414 rx_maybe_restart(sp, hregs, sregs);
411} 415}
412 416
@@ -433,12 +437,12 @@ static inline void kick_tx(struct net_device *dev,
433 * is not active! 437 * is not active!
434 */ 438 */
435 td = &sp->tx_desc[i]; 439 td = &sp->tx_desc[i];
436 DMA_SYNC_DESC_CPU(dev, td); 440 dma_sync_desc_cpu(dev, td);
437 while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == 441 while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
438 (HPCDMA_XIU | HPCDMA_ETXD)) { 442 (HPCDMA_XIU | HPCDMA_ETXD)) {
439 i = NEXT_TX(i); 443 i = NEXT_TX(i);
440 td = &sp->tx_desc[i]; 444 td = &sp->tx_desc[i];
441 DMA_SYNC_DESC_CPU(dev, td); 445 dma_sync_desc_cpu(dev, td);
442 } 446 }
443 if (td->tdma.cntinfo & HPCDMA_XIU) { 447 if (td->tdma.cntinfo & HPCDMA_XIU) {
444 hregs->tx_ndptr = VIRT_TO_DMA(sp, td); 448 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
@@ -470,7 +474,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
470 for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { 474 for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
471 td = &sp->tx_desc[j]; 475 td = &sp->tx_desc[j];
472 476
473 DMA_SYNC_DESC_CPU(dev, td); 477 dma_sync_desc_cpu(dev, td);
474 if (!(td->tdma.cntinfo & (HPCDMA_XIU))) 478 if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
475 break; 479 break;
476 if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { 480 if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
@@ -488,7 +492,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
488 dev_kfree_skb_any(td->skb); 492 dev_kfree_skb_any(td->skb);
489 td->skb = NULL; 493 td->skb = NULL;
490 } 494 }
491 DMA_SYNC_DESC_DEV(dev, td); 495 dma_sync_desc_dev(dev, td);
492 } 496 }
493} 497}
494 498
@@ -598,7 +602,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
598 dev->stats.tx_bytes += len; 602 dev->stats.tx_bytes += len;
599 entry = sp->tx_new; 603 entry = sp->tx_new;
600 td = &sp->tx_desc[entry]; 604 td = &sp->tx_desc[entry];
601 DMA_SYNC_DESC_CPU(dev, td); 605 dma_sync_desc_cpu(dev, td);
602 606
603 /* Create entry. There are so many races with adding a new 607 /* Create entry. There are so many races with adding a new
604 * descriptor to the chain: 608 * descriptor to the chain:
@@ -618,14 +622,14 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
618 len, DMA_TO_DEVICE); 622 len, DMA_TO_DEVICE);
619 td->tdma.cntinfo = (len & HPCDMA_BCNT) | 623 td->tdma.cntinfo = (len & HPCDMA_BCNT) |
620 HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; 624 HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
621 DMA_SYNC_DESC_DEV(dev, td); 625 dma_sync_desc_dev(dev, td);
622 if (sp->tx_old != sp->tx_new) { 626 if (sp->tx_old != sp->tx_new) {
623 struct sgiseeq_tx_desc *backend; 627 struct sgiseeq_tx_desc *backend;
624 628
625 backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; 629 backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
626 DMA_SYNC_DESC_CPU(dev, backend); 630 dma_sync_desc_cpu(dev, backend);
627 backend->tdma.cntinfo &= ~HPCDMA_EOX; 631 backend->tdma.cntinfo &= ~HPCDMA_EOX;
628 DMA_SYNC_DESC_DEV(dev, backend); 632 dma_sync_desc_dev(dev, backend);
629 } 633 }
630 sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ 634 sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
631 635
@@ -681,11 +685,11 @@ static inline void setup_tx_ring(struct net_device *dev,
681 while (i < (nbufs - 1)) { 685 while (i < (nbufs - 1)) {
682 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); 686 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
683 buf[i].tdma.pbuf = 0; 687 buf[i].tdma.pbuf = 0;
684 DMA_SYNC_DESC_DEV(dev, &buf[i]); 688 dma_sync_desc_dev(dev, &buf[i]);
685 i++; 689 i++;
686 } 690 }
687 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf); 691 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
688 DMA_SYNC_DESC_DEV(dev, &buf[i]); 692 dma_sync_desc_dev(dev, &buf[i]);
689} 693}
690 694
691static inline void setup_rx_ring(struct net_device *dev, 695static inline void setup_rx_ring(struct net_device *dev,
@@ -698,12 +702,12 @@ static inline void setup_rx_ring(struct net_device *dev,
698 while (i < (nbufs - 1)) { 702 while (i < (nbufs - 1)) {
699 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1); 703 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
700 buf[i].rdma.pbuf = 0; 704 buf[i].rdma.pbuf = 0;
701 DMA_SYNC_DESC_DEV(dev, &buf[i]); 705 dma_sync_desc_dev(dev, &buf[i]);
702 i++; 706 i++;
703 } 707 }
704 buf[i].rdma.pbuf = 0; 708 buf[i].rdma.pbuf = 0;
705 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf); 709 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
706 DMA_SYNC_DESC_DEV(dev, &buf[i]); 710 dma_sync_desc_dev(dev, &buf[i]);
707} 711}
708 712
709static int __init sgiseeq_probe(struct platform_device *pdev) 713static int __init sgiseeq_probe(struct platform_device *pdev)