aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sgiseeq.c
diff options
context:
space:
mode:
authorThomas Bogendoerfer <tsbogend@alpha.franken.de>2007-11-24 07:29:19 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:04:28 -0500
commit43831b1581031991357385dd6c0762c06a3a62ab (patch)
tree0b2dab44d0172d67cdbc869d392031324fc11318 /drivers/net/sgiseeq.c
parentb0cd2f9016f75eb8a9fdc45d32f9b41fb16d48c9 (diff)
SGISEEQ: use cached memory access to make driver work on IP28
SGI IP28 machines would need special treatment (enable adding addtional wait states) when accessing memory uncached. To avoid this pain I changed the driver to use only cached access to memory. Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/sgiseeq.c')
-rw-r--r--drivers/net/sgiseeq.c239
1 files changed, 166 insertions, 73 deletions
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ff4056310356..3145ca13d255 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -12,7 +12,6 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/slab.h>
16#include <linux/string.h> 15#include <linux/string.h>
17#include <linux/delay.h> 16#include <linux/delay.h>
18#include <linux/netdevice.h> 17#include <linux/netdevice.h>
@@ -53,14 +52,35 @@ static char *sgiseeqstr = "SGI Seeq8003";
53 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \ 52 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
54 sp->tx_old - sp->tx_new - 1) 53 sp->tx_old - sp->tx_new - 1)
55 54
55#define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
56 (dma_addr_t)((unsigned long)(v) - \
57 (unsigned long)((sp)->rx_desc)))
58
59#define DMA_SYNC_DESC_CPU(dev, addr) \
60 do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
61 sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } while (0)
62
63#define DMA_SYNC_DESC_DEV(dev, addr) \
64 do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
65 sizeof(struct sgiseeq_rx_desc), DMA_TO_DEVICE); } while (0)
66
67/* Copy frames shorter than rx_copybreak, otherwise pass on up in
68 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
69 */
70static int rx_copybreak = 100;
71
72#define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
73
56struct sgiseeq_rx_desc { 74struct sgiseeq_rx_desc {
57 volatile struct hpc_dma_desc rdma; 75 volatile struct hpc_dma_desc rdma;
58 volatile signed int buf_vaddr; 76 u8 padding[PAD_SIZE];
77 struct sk_buff *skb;
59}; 78};
60 79
61struct sgiseeq_tx_desc { 80struct sgiseeq_tx_desc {
62 volatile struct hpc_dma_desc tdma; 81 volatile struct hpc_dma_desc tdma;
63 volatile signed int buf_vaddr; 82 u8 padding[PAD_SIZE];
83 struct sk_buff *skb;
64}; 84};
65 85
66/* 86/*
@@ -163,35 +183,55 @@ static int seeq_init_ring(struct net_device *dev)
163 183
164 /* Setup tx ring. */ 184 /* Setup tx ring. */
165 for(i = 0; i < SEEQ_TX_BUFFERS; i++) { 185 for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
166 if (!sp->tx_desc[i].tdma.pbuf) {
167 unsigned long buffer;
168
169 buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL);
170 if (!buffer)
171 return -ENOMEM;
172 sp->tx_desc[i].buf_vaddr = CKSEG1ADDR(buffer);
173 sp->tx_desc[i].tdma.pbuf = CPHYSADDR(buffer);
174 }
175 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT; 186 sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
187 DMA_SYNC_DESC_DEV(dev, &sp->tx_desc[i]);
176 } 188 }
177 189
178 /* And now the rx ring. */ 190 /* And now the rx ring. */
179 for (i = 0; i < SEEQ_RX_BUFFERS; i++) { 191 for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
180 if (!sp->rx_desc[i].rdma.pbuf) { 192 if (!sp->rx_desc[i].rdma.pbuf) {
181 unsigned long buffer; 193 dma_addr_t dma_addr;
194 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
182 195
183 buffer = (unsigned long) kmalloc(PKT_BUF_SZ, GFP_KERNEL); 196 if (skb == NULL)
184 if (!buffer)
185 return -ENOMEM; 197 return -ENOMEM;
186 sp->rx_desc[i].buf_vaddr = CKSEG1ADDR(buffer); 198 skb_reserve(skb, 2);
187 sp->rx_desc[i].rdma.pbuf = CPHYSADDR(buffer); 199 dma_addr = dma_map_single(dev->dev.parent,
200 skb->data - 2,
201 PKT_BUF_SZ, DMA_FROM_DEVICE);
202 sp->rx_desc[i].skb = skb;
203 sp->rx_desc[i].rdma.pbuf = dma_addr;
188 } 204 }
189 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; 205 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
206 DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i]);
190 } 207 }
191 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; 208 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
209 DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i - 1]);
192 return 0; 210 return 0;
193} 211}
194 212
213static void seeq_purge_ring(struct net_device *dev)
214{
215 struct sgiseeq_private *sp = netdev_priv(dev);
216 int i;
217
218 /* clear tx ring. */
219 for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
220 if (sp->tx_desc[i].skb) {
221 dev_kfree_skb(sp->tx_desc[i].skb);
222 sp->tx_desc[i].skb = NULL;
223 }
224 }
225
226 /* And now the rx ring. */
227 for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
228 if (sp->rx_desc[i].skb) {
229 dev_kfree_skb(sp->rx_desc[i].skb);
230 sp->rx_desc[i].skb = NULL;
231 }
232 }
233}
234
195#ifdef DEBUG 235#ifdef DEBUG
196static struct sgiseeq_private *gpriv; 236static struct sgiseeq_private *gpriv;
197static struct net_device *gdev; 237static struct net_device *gdev;
@@ -258,8 +298,8 @@ static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
258 sregs->tstat = TSTAT_INIT_SEEQ; 298 sregs->tstat = TSTAT_INIT_SEEQ;
259 } 299 }
260 300
261 hregs->rx_ndptr = CPHYSADDR(sp->rx_desc); 301 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
262 hregs->tx_ndptr = CPHYSADDR(sp->tx_desc); 302 hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
263 303
264 seeq_go(sp, hregs, sregs); 304 seeq_go(sp, hregs, sregs);
265 return 0; 305 return 0;
@@ -283,69 +323,90 @@ static inline void rx_maybe_restart(struct sgiseeq_private *sp,
283 struct sgiseeq_regs *sregs) 323 struct sgiseeq_regs *sregs)
284{ 324{
285 if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) { 325 if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
286 hregs->rx_ndptr = CPHYSADDR(sp->rx_desc + sp->rx_new); 326 hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
287 seeq_go(sp, hregs, sregs); 327 seeq_go(sp, hregs, sregs);
288 } 328 }
289} 329}
290 330
291#define for_each_rx(rd, sp) for((rd) = &(sp)->rx_desc[(sp)->rx_new]; \
292 !((rd)->rdma.cntinfo & HPCDMA_OWN); \
293 (rd) = &(sp)->rx_desc[(sp)->rx_new])
294
295static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp, 331static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
296 struct hpc3_ethregs *hregs, 332 struct hpc3_ethregs *hregs,
297 struct sgiseeq_regs *sregs) 333 struct sgiseeq_regs *sregs)
298{ 334{
299 struct sgiseeq_rx_desc *rd; 335 struct sgiseeq_rx_desc *rd;
300 struct sk_buff *skb = NULL; 336 struct sk_buff *skb = NULL;
337 struct sk_buff *newskb;
301 unsigned char pkt_status; 338 unsigned char pkt_status;
302 unsigned char *pkt_pointer = NULL;
303 int len = 0; 339 int len = 0;
304 unsigned int orig_end = PREV_RX(sp->rx_new); 340 unsigned int orig_end = PREV_RX(sp->rx_new);
305 341
306 /* Service every received packet. */ 342 /* Service every received packet. */
307 for_each_rx(rd, sp) { 343 rd = &sp->rx_desc[sp->rx_new];
344 DMA_SYNC_DESC_CPU(dev, rd);
345 while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
308 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; 346 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
309 pkt_pointer = (unsigned char *)(long)rd->buf_vaddr; 347 dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
310 pkt_status = pkt_pointer[len + 2]; 348 PKT_BUF_SZ, DMA_FROM_DEVICE);
311 349 pkt_status = rd->skb->data[len];
312 if (pkt_status & SEEQ_RSTAT_FIG) { 350 if (pkt_status & SEEQ_RSTAT_FIG) {
313 /* Packet is OK. */ 351 /* Packet is OK. */
314 skb = dev_alloc_skb(len + 2); 352 /* We don't want to receive our own packets */
315 353 if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
316 if (skb) { 354 if (len > rx_copybreak) {
317 skb_reserve(skb, 2); 355 skb = rd->skb;
318 skb_put(skb, len); 356 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
319 357 if (!newskb) {
320 /* Copy out of kseg1 to avoid silly cache flush. */ 358 newskb = skb;
321 skb_copy_to_linear_data(skb, pkt_pointer + 2, len); 359 skb = NULL;
322 skb->protocol = eth_type_trans(skb, dev); 360 goto memory_squeeze;
323 361 }
324 /* We don't want to receive our own packets */ 362 skb_reserve(newskb, 2);
325 if (memcmp(eth_hdr(skb)->h_source, dev->dev_addr, ETH_ALEN)) { 363 } else {
364 skb = netdev_alloc_skb(dev, len + 2);
365 if (skb) {
366 skb_reserve(skb, 2);
367 skb_copy_to_linear_data(skb, rd->skb->data, len);
368 }
369 newskb = rd->skb;
370 }
371memory_squeeze:
372 if (skb) {
373 skb_put(skb, len);
374 skb->protocol = eth_type_trans(skb, dev);
326 netif_rx(skb); 375 netif_rx(skb);
327 dev->last_rx = jiffies; 376 dev->last_rx = jiffies;
328 dev->stats.rx_packets++; 377 dev->stats.rx_packets++;
329 dev->stats.rx_bytes += len; 378 dev->stats.rx_bytes += len;
330 } else { 379 } else {
331 /* Silently drop my own packets */ 380 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
332 dev_kfree_skb_irq(skb); 381 dev->name);
382 dev->stats.rx_dropped++;
333 } 383 }
334 } else { 384 } else {
335 printk (KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", 385 /* Silently drop my own packets */
336 dev->name); 386 newskb = rd->skb;
337 dev->stats.rx_dropped++;
338 } 387 }
339 } else { 388 } else {
340 record_rx_errors(dev, pkt_status); 389 record_rx_errors(dev, pkt_status);
390 newskb = rd->skb;
341 } 391 }
392 rd->skb = newskb;
393 rd->rdma.pbuf = dma_map_single(dev->dev.parent,
394 newskb->data - 2,
395 PKT_BUF_SZ, DMA_FROM_DEVICE);
342 396
343 /* Return the entry to the ring pool. */ 397 /* Return the entry to the ring pool. */
344 rd->rdma.cntinfo = RCNTINFO_INIT; 398 rd->rdma.cntinfo = RCNTINFO_INIT;
345 sp->rx_new = NEXT_RX(sp->rx_new); 399 sp->rx_new = NEXT_RX(sp->rx_new);
400 DMA_SYNC_DESC_DEV(dev, rd);
401 rd = &sp->rx_desc[sp->rx_new];
402 DMA_SYNC_DESC_CPU(dev, rd);
346 } 403 }
404 DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[orig_end]);
347 sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR); 405 sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
406 DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[orig_end]);
407 DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
348 sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR; 408 sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
409 DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
349 rx_maybe_restart(sp, hregs, sregs); 410 rx_maybe_restart(sp, hregs, sregs);
350} 411}
351 412
@@ -358,20 +419,29 @@ static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
358 } 419 }
359} 420}
360 421
361static inline void kick_tx(struct sgiseeq_tx_desc *td, 422static inline void kick_tx(struct net_device *dev,
423 struct sgiseeq_private *sp,
362 struct hpc3_ethregs *hregs) 424 struct hpc3_ethregs *hregs)
363{ 425{
426 struct sgiseeq_tx_desc *td;
427 int i = sp->tx_old;
428
364 /* If the HPC aint doin nothin, and there are more packets 429 /* If the HPC aint doin nothin, and there are more packets
365 * with ETXD cleared and XIU set we must make very certain 430 * with ETXD cleared and XIU set we must make very certain
366 * that we restart the HPC else we risk locking up the 431 * that we restart the HPC else we risk locking up the
367 * adapter. The following code is only safe iff the HPCDMA 432 * adapter. The following code is only safe iff the HPCDMA
368 * is not active! 433 * is not active!
369 */ 434 */
435 td = &sp->tx_desc[i];
436 DMA_SYNC_DESC_CPU(dev, td);
370 while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) == 437 while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
371 (HPCDMA_XIU | HPCDMA_ETXD)) 438 (HPCDMA_XIU | HPCDMA_ETXD)) {
372 td = (struct sgiseeq_tx_desc *)(long) CKSEG1ADDR(td->tdma.pnext); 439 i = NEXT_TX(i);
440 td = &sp->tx_desc[i];
441 DMA_SYNC_DESC_CPU(dev, td);
442 }
373 if (td->tdma.cntinfo & HPCDMA_XIU) { 443 if (td->tdma.cntinfo & HPCDMA_XIU) {
374 hregs->tx_ndptr = CPHYSADDR(td); 444 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
375 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; 445 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
376 } 446 }
377} 447}
@@ -400,11 +470,12 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
400 for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { 470 for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
401 td = &sp->tx_desc[j]; 471 td = &sp->tx_desc[j];
402 472
473 DMA_SYNC_DESC_CPU(dev, td);
403 if (!(td->tdma.cntinfo & (HPCDMA_XIU))) 474 if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
404 break; 475 break;
405 if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) { 476 if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
406 if (!(status & HPC3_ETXCTRL_ACTIVE)) { 477 if (!(status & HPC3_ETXCTRL_ACTIVE)) {
407 hregs->tx_ndptr = CPHYSADDR(td); 478 hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
408 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE; 479 hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
409 } 480 }
410 break; 481 break;
@@ -413,6 +484,11 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
413 sp->tx_old = NEXT_TX(sp->tx_old); 484 sp->tx_old = NEXT_TX(sp->tx_old);
414 td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE); 485 td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
415 td->tdma.cntinfo |= HPCDMA_EOX; 486 td->tdma.cntinfo |= HPCDMA_EOX;
487 if (td->skb) {
488 dev_kfree_skb_any(td->skb);
489 td->skb = NULL;
490 }
491 DMA_SYNC_DESC_DEV(dev, td);
416 } 492 }
417} 493}
418 494
@@ -480,6 +556,7 @@ static int sgiseeq_close(struct net_device *dev)
480 /* Shutdown the Seeq. */ 556 /* Shutdown the Seeq. */
481 reset_hpc3_and_seeq(sp->hregs, sregs); 557 reset_hpc3_and_seeq(sp->hregs, sregs);
482 free_irq(irq, dev); 558 free_irq(irq, dev);
559 seeq_purge_ring(dev);
483 560
484 return 0; 561 return 0;
485} 562}
@@ -506,16 +583,22 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
506 struct hpc3_ethregs *hregs = sp->hregs; 583 struct hpc3_ethregs *hregs = sp->hregs;
507 unsigned long flags; 584 unsigned long flags;
508 struct sgiseeq_tx_desc *td; 585 struct sgiseeq_tx_desc *td;
509 int skblen, len, entry; 586 int len, entry;
510 587
511 spin_lock_irqsave(&sp->tx_lock, flags); 588 spin_lock_irqsave(&sp->tx_lock, flags);
512 589
513 /* Setup... */ 590 /* Setup... */
514 skblen = skb->len; 591 len = skb->len;
515 len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; 592 if (len < ETH_ZLEN) {
593 if (skb_padto(skb, ETH_ZLEN))
594 return 0;
595 len = ETH_ZLEN;
596 }
597
516 dev->stats.tx_bytes += len; 598 dev->stats.tx_bytes += len;
517 entry = sp->tx_new; 599 entry = sp->tx_new;
518 td = &sp->tx_desc[entry]; 600 td = &sp->tx_desc[entry];
601 DMA_SYNC_DESC_CPU(dev, td);
519 602
520 /* Create entry. There are so many races with adding a new 603 /* Create entry. There are so many races with adding a new
521 * descriptor to the chain: 604 * descriptor to the chain:
@@ -530,25 +613,27 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
530 * entry and the HPC got to the end of the chain before we 613 * entry and the HPC got to the end of the chain before we
531 * added this new entry and restarted it. 614 * added this new entry and restarted it.
532 */ 615 */
533 skb_copy_from_linear_data(skb, (char *)(long)td->buf_vaddr, skblen); 616 td->skb = skb;
534 if (len != skblen) 617 td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
535 memset((char *)(long)td->buf_vaddr + skb->len, 0, len-skblen); 618 len, DMA_TO_DEVICE);
536 td->tdma.cntinfo = (len & HPCDMA_BCNT) | 619 td->tdma.cntinfo = (len & HPCDMA_BCNT) |
537 HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX; 620 HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
621 DMA_SYNC_DESC_DEV(dev, td);
538 if (sp->tx_old != sp->tx_new) { 622 if (sp->tx_old != sp->tx_new) {
539 struct sgiseeq_tx_desc *backend; 623 struct sgiseeq_tx_desc *backend;
540 624
541 backend = &sp->tx_desc[PREV_TX(sp->tx_new)]; 625 backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
626 DMA_SYNC_DESC_CPU(dev, backend);
542 backend->tdma.cntinfo &= ~HPCDMA_EOX; 627 backend->tdma.cntinfo &= ~HPCDMA_EOX;
628 DMA_SYNC_DESC_DEV(dev, backend);
543 } 629 }
544 sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */ 630 sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
545 631
546 /* Maybe kick the HPC back into motion. */ 632 /* Maybe kick the HPC back into motion. */
547 if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) 633 if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
548 kick_tx(&sp->tx_desc[sp->tx_old], hregs); 634 kick_tx(dev, sp, hregs);
549 635
550 dev->trans_start = jiffies; 636 dev->trans_start = jiffies;
551 dev_kfree_skb(skb);
552 637
553 if (!TX_BUFFS_AVAIL(sp)) 638 if (!TX_BUFFS_AVAIL(sp))
554 netif_stop_queue(dev); 639 netif_stop_queue(dev);
@@ -586,33 +671,41 @@ static void sgiseeq_set_multicast(struct net_device *dev)
586 sgiseeq_reset(dev); 671 sgiseeq_reset(dev);
587} 672}
588 673
589static inline void setup_tx_ring(struct sgiseeq_tx_desc *buf, int nbufs) 674static inline void setup_tx_ring(struct net_device *dev,
675 struct sgiseeq_tx_desc *buf,
676 int nbufs)
590{ 677{
678 struct sgiseeq_private *sp = netdev_priv(dev);
591 int i = 0; 679 int i = 0;
592 680
593 while (i < (nbufs - 1)) { 681 while (i < (nbufs - 1)) {
594 buf[i].tdma.pnext = CPHYSADDR(buf + i + 1); 682 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
595 buf[i].tdma.pbuf = 0; 683 buf[i].tdma.pbuf = 0;
684 DMA_SYNC_DESC_DEV(dev, &buf[i]);
596 i++; 685 i++;
597 } 686 }
598 buf[i].tdma.pnext = CPHYSADDR(buf); 687 buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
688 DMA_SYNC_DESC_DEV(dev, &buf[i]);
599} 689}
600 690
601static inline void setup_rx_ring(struct sgiseeq_rx_desc *buf, int nbufs) 691static inline void setup_rx_ring(struct net_device *dev,
692 struct sgiseeq_rx_desc *buf,
693 int nbufs)
602{ 694{
695 struct sgiseeq_private *sp = netdev_priv(dev);
603 int i = 0; 696 int i = 0;
604 697
605 while (i < (nbufs - 1)) { 698 while (i < (nbufs - 1)) {
606 buf[i].rdma.pnext = CPHYSADDR(buf + i + 1); 699 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
607 buf[i].rdma.pbuf = 0; 700 buf[i].rdma.pbuf = 0;
701 DMA_SYNC_DESC_DEV(dev, &buf[i]);
608 i++; 702 i++;
609 } 703 }
610 buf[i].rdma.pbuf = 0; 704 buf[i].rdma.pbuf = 0;
611 buf[i].rdma.pnext = CPHYSADDR(buf); 705 buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
706 DMA_SYNC_DESC_DEV(dev, &buf[i]);
612} 707}
613 708
614#define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf))
615
616static int __init sgiseeq_probe(struct platform_device *pdev) 709static int __init sgiseeq_probe(struct platform_device *pdev)
617{ 710{
618 struct sgiseeq_platform_data *pd = pdev->dev.platform_data; 711 struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
@@ -621,7 +714,7 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
621 unsigned int irq = pd->irq; 714 unsigned int irq = pd->irq;
622 struct sgiseeq_private *sp; 715 struct sgiseeq_private *sp;
623 struct net_device *dev; 716 struct net_device *dev;
624 int err, i; 717 int err;
625 DECLARE_MAC_BUF(mac); 718 DECLARE_MAC_BUF(mac);
626 719
627 dev = alloc_etherdev(sizeof (struct sgiseeq_private)); 720 dev = alloc_etherdev(sizeof (struct sgiseeq_private));
@@ -635,7 +728,7 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
635 sp = netdev_priv(dev); 728 sp = netdev_priv(dev);
636 729
637 /* Make private data page aligned */ 730 /* Make private data page aligned */
638 sr = dma_alloc_coherent(&pdev->dev, sizeof(*sp->srings), 731 sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
639 &sp->srings_dma, GFP_KERNEL); 732 &sp->srings_dma, GFP_KERNEL);
640 if (!sr) { 733 if (!sr) {
641 printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n"); 734 printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
@@ -647,8 +740,8 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
647 sp->tx_desc = sp->srings->txvector; 740 sp->tx_desc = sp->srings->txvector;
648 741
649 /* A couple calculations now, saves many cycles later. */ 742 /* A couple calculations now, saves many cycles later. */
650 setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); 743 setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
651 setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); 744 setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
652 745
653 memcpy(dev->dev_addr, pd->mac, ETH_ALEN); 746 memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
654 747
@@ -716,8 +809,8 @@ static int __exit sgiseeq_remove(struct platform_device *pdev)
716 struct sgiseeq_private *sp = netdev_priv(dev); 809 struct sgiseeq_private *sp = netdev_priv(dev);
717 810
718 unregister_netdev(dev); 811 unregister_netdev(dev);
719 dma_free_coherent(&pdev->dev, sizeof(*sp->srings), sp->srings, 812 dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
720 sp->srings_dma); 813 sp->srings_dma);
721 free_netdev(dev); 814 free_netdev(dev);
722 platform_set_drvdata(pdev, NULL); 815 platform_set_drvdata(pdev, NULL);
723 816