aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/spider_net.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2006-01-12 17:16:44 -0500
committerJeff Garzik <jgarzik@pobox.com>2006-01-17 07:25:01 -0500
commit11f1a52b87eaf830bd03d4e01d563437c30f7728 (patch)
treef251162b093f1ac7153d17de8d046f16f3e2e17e /drivers/net/spider_net.c
parent8e0a613bf61ccaab376877d7c2ed50315b8ca6d7 (diff)
[PATCH] spidernet: performance optimizations
Performance optimizations, changes in these areas: - RX and TX checksum offload - correct maximum MTU - don't use TX interrupts anymore, use a timer instead - remove some superfluous barriers - improve RX RAM full handling From: Utz Bacher <utz.bacher@de.ibm.com> Signed-off-by: Jens Osterkamp <jens.osterkamp@de.ibm.com> Signed-off-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/net/spider_net.c')
-rw-r--r--drivers/net/spider_net.c497
1 files changed, 248 insertions, 249 deletions
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 86969191c3f7..e2ad9aef0109 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/config.h> 24#include <linux/config.h>
25
26#include <linux/compiler.h> 25#include <linux/compiler.h>
27#include <linux/crc32.h> 26#include <linux/crc32.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
@@ -43,6 +42,7 @@
43#include <linux/slab.h> 42#include <linux/slab.h>
44#include <linux/tcp.h> 43#include <linux/tcp.h>
45#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/vmalloc.h>
46#include <linux/wait.h> 46#include <linux/wait.h>
47#include <linux/workqueue.h> 47#include <linux/workqueue.h>
48#include <asm/bitops.h> 48#include <asm/bitops.h>
@@ -108,42 +108,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
108 writel(value, card->regs + reg); 108 writel(value, card->regs + reg);
109} 109}
110 110
111/**
112 * spider_net_write_reg_sync - writes to an SMMIO register of a card
113 * @card: device structure
114 * @reg: register to write to
115 * @value: value to write into the specified SMMIO register
116 *
117 * Unlike spider_net_write_reg, this will also make sure the
118 * data arrives on the card by reading the reg again.
119 */
120static void
121spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
122{
123 value = cpu_to_le32(value);
124 writel(value, card->regs + reg);
125 (void)readl(card->regs + reg);
126}
127
128/**
129 * spider_net_rx_irq_off - switch off rx irq on this spider card
130 * @card: device structure
131 *
132 * switches off rx irq by masking them out in the GHIINTnMSK register
133 */
134static void
135spider_net_rx_irq_off(struct spider_net_card *card)
136{
137 u32 regvalue;
138 unsigned long flags;
139
140 spin_lock_irqsave(&card->intmask_lock, flags);
141 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
142 regvalue &= ~SPIDER_NET_RXINT;
143 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
144 spin_unlock_irqrestore(&card->intmask_lock, flags);
145}
146
147/** spider_net_write_phy - write to phy register 111/** spider_net_write_phy - write to phy register
148 * @netdev: adapter to be written to 112 * @netdev: adapter to be written to
149 * @mii_id: id of MII 113 * @mii_id: id of MII
@@ -199,60 +163,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
199} 163}
200 164
201/** 165/**
202 * spider_net_rx_irq_on - switch on rx irq on this spider card 166 * spider_net_rx_irq_off - switch off rx irq on this spider card
203 * @card: device structure
204 *
205 * switches on rx irq by enabling them in the GHIINTnMSK register
206 */
207static void
208spider_net_rx_irq_on(struct spider_net_card *card)
209{
210 u32 regvalue;
211 unsigned long flags;
212
213 spin_lock_irqsave(&card->intmask_lock, flags);
214 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
215 regvalue |= SPIDER_NET_RXINT;
216 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
217 spin_unlock_irqrestore(&card->intmask_lock, flags);
218}
219
220/**
221 * spider_net_tx_irq_off - switch off tx irq on this spider card
222 * @card: device structure 167 * @card: device structure
223 * 168 *
224 * switches off tx irq by masking them out in the GHIINTnMSK register 169 * switches off rx irq by masking them out in the GHIINTnMSK register
225 */ 170 */
226static void 171static void
227spider_net_tx_irq_off(struct spider_net_card *card) 172spider_net_rx_irq_off(struct spider_net_card *card)
228{ 173{
229 u32 regvalue; 174 u32 regvalue;
230 unsigned long flags;
231 175
232 spin_lock_irqsave(&card->intmask_lock, flags); 176 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
233 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 177 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
234 regvalue &= ~SPIDER_NET_TXINT;
235 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
236 spin_unlock_irqrestore(&card->intmask_lock, flags);
237} 178}
238 179
239/** 180/**
240 * spider_net_tx_irq_on - switch on tx irq on this spider card 181 * spider_net_rx_irq_on - switch on rx irq on this spider card
241 * @card: device structure 182 * @card: device structure
242 * 183 *
243 * switches on tx irq by enabling them in the GHIINTnMSK register 184 * switches on rx irq by enabling them in the GHIINTnMSK register
244 */ 185 */
245static void 186static void
246spider_net_tx_irq_on(struct spider_net_card *card) 187spider_net_rx_irq_on(struct spider_net_card *card)
247{ 188{
248 u32 regvalue; 189 u32 regvalue;
249 unsigned long flags;
250 190
251 spin_lock_irqsave(&card->intmask_lock, flags); 191 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
252 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 192 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
253 regvalue |= SPIDER_NET_TXINT;
254 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
255 spin_unlock_irqrestore(&card->intmask_lock, flags);
256} 193}
257 194
258/** 195/**
@@ -326,9 +263,8 @@ static enum spider_net_descr_status
326spider_net_get_descr_status(struct spider_net_descr *descr) 263spider_net_get_descr_status(struct spider_net_descr *descr)
327{ 264{
328 u32 cmd_status; 265 u32 cmd_status;
329 rmb(); 266
330 cmd_status = descr->dmac_cmd_status; 267 cmd_status = descr->dmac_cmd_status;
331 rmb();
332 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; 268 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
333 /* no need to mask out any bits, as cmd_status is 32 bits wide only 269 /* no need to mask out any bits, as cmd_status is 32 bits wide only
334 * (and unsigned) */ 270 * (and unsigned) */
@@ -349,7 +285,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
349{ 285{
350 u32 cmd_status; 286 u32 cmd_status;
351 /* read the status */ 287 /* read the status */
352 mb();
353 cmd_status = descr->dmac_cmd_status; 288 cmd_status = descr->dmac_cmd_status;
354 /* clean the upper 4 bits */ 289 /* clean the upper 4 bits */
355 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; 290 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
@@ -357,7 +292,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
357 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; 292 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
358 /* and write it back */ 293 /* and write it back */
359 descr->dmac_cmd_status = cmd_status; 294 descr->dmac_cmd_status = cmd_status;
360 wmb();
361} 295}
362 296
363/** 297/**
@@ -398,8 +332,9 @@ spider_net_init_chain(struct spider_net_card *card,
398{ 332{
399 int i; 333 int i;
400 struct spider_net_descr *descr; 334 struct spider_net_descr *descr;
335 dma_addr_t buf;
401 336
402 spin_lock_init(&card->chain_lock); 337 atomic_set(&card->rx_chain_refill,0);
403 338
404 descr = start_descr; 339 descr = start_descr;
405 memset(descr, 0, sizeof(*descr) * no); 340 memset(descr, 0, sizeof(*descr) * no);
@@ -408,14 +343,14 @@ spider_net_init_chain(struct spider_net_card *card,
408 for (i=0; i<no; i++, descr++) { 343 for (i=0; i<no; i++, descr++) {
409 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 344 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
410 345
411 descr->bus_addr = 346 buf = pci_map_single(card->pdev, descr,
412 pci_map_single(card->pdev, descr, 347 SPIDER_NET_DESCR_SIZE,
413 SPIDER_NET_DESCR_SIZE, 348 PCI_DMA_BIDIRECTIONAL);
414 PCI_DMA_BIDIRECTIONAL);
415 349
416 if (descr->bus_addr == DMA_ERROR_CODE) 350 if (buf == DMA_ERROR_CODE)
417 goto iommu_error; 351 goto iommu_error;
418 352
353 descr->bus_addr = buf;
419 descr->next = descr + 1; 354 descr->next = descr + 1;
420 descr->prev = descr - 1; 355 descr->prev = descr - 1;
421 356
@@ -439,7 +374,8 @@ iommu_error:
439 for (i=0; i < no; i++, descr++) 374 for (i=0; i < no; i++, descr++)
440 if (descr->bus_addr) 375 if (descr->bus_addr)
441 pci_unmap_single(card->pdev, descr->bus_addr, 376 pci_unmap_single(card->pdev, descr->bus_addr,
442 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL); 377 SPIDER_NET_DESCR_SIZE,
378 PCI_DMA_BIDIRECTIONAL);
443 return -ENOMEM; 379 return -ENOMEM;
444} 380}
445 381
@@ -459,7 +395,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
459 if (descr->skb) { 395 if (descr->skb) {
460 dev_kfree_skb(descr->skb); 396 dev_kfree_skb(descr->skb);
461 pci_unmap_single(card->pdev, descr->buf_addr, 397 pci_unmap_single(card->pdev, descr->buf_addr,
462 SPIDER_NET_MAX_MTU, 398 SPIDER_NET_MAX_FRAME,
463 PCI_DMA_BIDIRECTIONAL); 399 PCI_DMA_BIDIRECTIONAL);
464 } 400 }
465 descr = descr->next; 401 descr = descr->next;
@@ -486,7 +422,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
486 int bufsize; 422 int bufsize;
487 423
488 /* we need to round up the buffer size to a multiple of 128 */ 424 /* we need to round up the buffer size to a multiple of 128 */
489 bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) & 425 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
490 (~(SPIDER_NET_RXBUF_ALIGN - 1)); 426 (~(SPIDER_NET_RXBUF_ALIGN - 1));
491 427
492 /* and we need to have it 128 byte aligned, therefore we allocate a 428 /* and we need to have it 128 byte aligned, therefore we allocate a
@@ -494,10 +430,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
494 /* allocate an skb */ 430 /* allocate an skb */
495 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); 431 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
496 if (!descr->skb) { 432 if (!descr->skb) {
497 if (net_ratelimit()) 433 if (netif_msg_rx_err(card) && net_ratelimit())
498 if (netif_msg_rx_err(card)) 434 pr_err("Not enough memory to allocate rx buffer\n");
499 pr_err("Not enough memory to allocate "
500 "rx buffer\n");
501 return -ENOMEM; 435 return -ENOMEM;
502 } 436 }
503 descr->buf_size = bufsize; 437 descr->buf_size = bufsize;
@@ -512,12 +446,11 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
512 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 446 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
513 /* io-mmu-map the skb */ 447 /* io-mmu-map the skb */
514 buf = pci_map_single(card->pdev, descr->skb->data, 448 buf = pci_map_single(card->pdev, descr->skb->data,
515 SPIDER_NET_MAX_MTU, 449 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
516 PCI_DMA_BIDIRECTIONAL);
517 descr->buf_addr = buf; 450 descr->buf_addr = buf;
518 if (buf == DMA_ERROR_CODE) { 451 if (buf == DMA_ERROR_CODE) {
519 dev_kfree_skb_any(descr->skb); 452 dev_kfree_skb_any(descr->skb);
520 if (netif_msg_rx_err(card)) 453 if (netif_msg_rx_err(card) && net_ratelimit())
521 pr_err("Could not iommu-map rx buffer\n"); 454 pr_err("Could not iommu-map rx buffer\n");
522 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 455 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
523 } else { 456 } else {
@@ -528,10 +461,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
528} 461}
529 462
530/** 463/**
531 * spider_net_enable_rxctails - sets RX dmac chain tail addresses 464 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
532 * @card: card structure 465 * @card: card structure
533 * 466 *
534 * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the 467 * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
535 * chip by writing to the appropriate register. DMA is enabled in 468 * chip by writing to the appropriate register. DMA is enabled in
536 * spider_net_enable_rxdmac. 469 * spider_net_enable_rxdmac.
537 */ 470 */
@@ -553,6 +486,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
553static void 486static void
554spider_net_enable_rxdmac(struct spider_net_card *card) 487spider_net_enable_rxdmac(struct spider_net_card *card)
555{ 488{
489 wmb();
556 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, 490 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
557 SPIDER_NET_DMA_RX_VALUE); 491 SPIDER_NET_DMA_RX_VALUE);
558} 492}
@@ -561,32 +495,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
561 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains 495 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
562 * @card: card structure 496 * @card: card structure
563 * 497 *
564 * refills descriptors in all chains (last used chain first): allocates skbs 498 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
565 * and iommu-maps them.
566 */ 499 */
567static void 500static void
568spider_net_refill_rx_chain(struct spider_net_card *card) 501spider_net_refill_rx_chain(struct spider_net_card *card)
569{ 502{
570 struct spider_net_descr_chain *chain; 503 struct spider_net_descr_chain *chain;
571 int count = 0;
572 unsigned long flags;
573 504
574 chain = &card->rx_chain; 505 chain = &card->rx_chain;
575 506
576 spin_lock_irqsave(&card->chain_lock, flags); 507 /* one context doing the refill (and a second context seeing that
577 while (spider_net_get_descr_status(chain->head) == 508 * and omitting it) is ok. If called by NAPI, we'll be called again
578 SPIDER_NET_DESCR_NOT_IN_USE) { 509 * as spider_net_decode_one_descr is called several times. If some
579 if (spider_net_prepare_rx_descr(card, chain->head)) 510 * interrupt calls us, the NAPI is about to clean up anyway. */
580 break; 511 if (atomic_inc_return(&card->rx_chain_refill) == 1)
581 count++; 512 while (spider_net_get_descr_status(chain->head) ==
582 chain->head = chain->head->next; 513 SPIDER_NET_DESCR_NOT_IN_USE) {
583 } 514 if (spider_net_prepare_rx_descr(card, chain->head))
584 spin_unlock_irqrestore(&card->chain_lock, flags); 515 break;
516 chain->head = chain->head->next;
517 }
585 518
586 /* could be optimized, only do that, if we know the DMA processing 519 atomic_dec(&card->rx_chain_refill);
587 * has terminated */
588 if (count)
589 spider_net_enable_rxdmac(card);
590} 520}
591 521
592/** 522/**
@@ -615,6 +545,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
615 /* this will allocate the rest of the rx buffers; if not, it's 545 /* this will allocate the rest of the rx buffers; if not, it's
616 * business as usual later on */ 546 * business as usual later on */
617 spider_net_refill_rx_chain(card); 547 spider_net_refill_rx_chain(card);
548 spider_net_enable_rxdmac(card);
618 return 0; 549 return 0;
619 550
620error: 551error:
@@ -651,24 +582,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
651 * @card: adapter structure 582 * @card: adapter structure
652 * @brutal: if set, don't care about whether descriptor seems to be in use 583 * @brutal: if set, don't care about whether descriptor seems to be in use
653 * 584 *
654 * releases the tx descriptors that spider has finished with (if non-brutal) 585 * returns 0 if the tx ring is empty, otherwise 1.
655 * or simply release tx descriptors (if brutal) 586 *
587 * spider_net_release_tx_chain releases the tx descriptors that spider has
588 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
589 * If some other context is calling this function, we return 1 so that we're
590 * scheduled again (if we were scheduled) and will not loose initiative.
656 */ 591 */
657static void 592static int
658spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 593spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
659{ 594{
660 struct spider_net_descr_chain *tx_chain = &card->tx_chain; 595 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
661 enum spider_net_descr_status status; 596 enum spider_net_descr_status status;
662 597
663 spider_net_tx_irq_off(card); 598 if (atomic_inc_return(&card->tx_chain_release) != 1) {
599 atomic_dec(&card->tx_chain_release);
600 return 1;
601 }
664 602
665 /* no lock for chain needed, if this is only executed once at a time */
666again:
667 for (;;) { 603 for (;;) {
668 status = spider_net_get_descr_status(tx_chain->tail); 604 status = spider_net_get_descr_status(tx_chain->tail);
669 switch (status) { 605 switch (status) {
670 case SPIDER_NET_DESCR_CARDOWNED: 606 case SPIDER_NET_DESCR_CARDOWNED:
671 if (!brutal) goto out; 607 if (!brutal)
608 goto out;
672 /* fallthrough, if we release the descriptors 609 /* fallthrough, if we release the descriptors
673 * brutally (then we don't care about 610 * brutally (then we don't care about
674 * SPIDER_NET_DESCR_CARDOWNED) */ 611 * SPIDER_NET_DESCR_CARDOWNED) */
@@ -695,25 +632,30 @@ again:
695 tx_chain->tail = tx_chain->tail->next; 632 tx_chain->tail = tx_chain->tail->next;
696 } 633 }
697out: 634out:
635 atomic_dec(&card->tx_chain_release);
636
698 netif_wake_queue(card->netdev); 637 netif_wake_queue(card->netdev);
699 638
700 if (!brutal) { 639 if (status == SPIDER_NET_DESCR_CARDOWNED)
701 /* switch on tx irqs (while we are still in the interrupt 640 return 1;
702 * handler, so we don't get an interrupt), check again 641 return 0;
703 * for done descriptors. This results in fewer interrupts */ 642}
704 spider_net_tx_irq_on(card);
705 status = spider_net_get_descr_status(tx_chain->tail);
706 switch (status) {
707 case SPIDER_NET_DESCR_RESPONSE_ERROR:
708 case SPIDER_NET_DESCR_PROTECTION_ERROR:
709 case SPIDER_NET_DESCR_FORCE_END:
710 case SPIDER_NET_DESCR_COMPLETE:
711 goto again;
712 default:
713 break;
714 }
715 }
716 643
644/**
645 * spider_net_cleanup_tx_ring - cleans up the TX ring
646 * @card: card structure
647 *
648 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
649 * interrupts to cleanup our TX ring) and returns sent packets to the stack
650 * by freeing them
651 */
652static void
653spider_net_cleanup_tx_ring(struct spider_net_card *card)
654{
655 if ( (spider_net_release_tx_chain(card, 0)) &&
656 (card->netdev->flags & IFF_UP) ) {
657 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
658 }
717} 659}
718 660
719/** 661/**
@@ -728,16 +670,22 @@ out:
728static u8 670static u8
729spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) 671spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
730{ 672{
731 /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
732 * ff:ff:ff:ff:ff:ff must result in 0xfd */
733 u32 crc; 673 u32 crc;
734 u8 hash; 674 u8 hash;
675 char addr_for_crc[ETH_ALEN] = { 0, };
676 int i, bit;
677
678 for (i = 0; i < ETH_ALEN * 8; i++) {
679 bit = (addr[i / 8] >> (i % 8)) & 1;
680 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
681 }
735 682
736 crc = crc32_be(~0, addr, netdev->addr_len); 683 crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
737 684
738 hash = (crc >> 27); 685 hash = (crc >> 27);
739 hash <<= 3; 686 hash <<= 3;
740 hash |= crc & 7; 687 hash |= crc & 7;
688 hash &= 0xff;
741 689
742 return hash; 690 return hash;
743} 691}
@@ -823,9 +771,11 @@ spider_net_stop(struct net_device *netdev)
823{ 771{
824 struct spider_net_card *card = netdev_priv(netdev); 772 struct spider_net_card *card = netdev_priv(netdev);
825 773
774 tasklet_kill(&card->rxram_full_tl);
826 netif_poll_disable(netdev); 775 netif_poll_disable(netdev);
827 netif_carrier_off(netdev); 776 netif_carrier_off(netdev);
828 netif_stop_queue(netdev); 777 netif_stop_queue(netdev);
778 del_timer_sync(&card->tx_timer);
829 779
830 /* disable/mask all interrupts */ 780 /* disable/mask all interrupts */
831 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 781 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
@@ -874,13 +824,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
874 * @skb: packet to consider 824 * @skb: packet to consider
875 * 825 *
876 * fills out the command and status field of the descriptor structure, 826 * fills out the command and status field of the descriptor structure,
877 * depending on hardware checksum settings. This function assumes a wmb() 827 * depending on hardware checksum settings.
878 * has executed before.
879 */ 828 */
880static void 829static void
881spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, 830spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
882 struct sk_buff *skb) 831 struct sk_buff *skb)
883{ 832{
833 /* make sure the other fields in the descriptor are written */
834 wmb();
835
884 if (skb->ip_summed != CHECKSUM_HW) { 836 if (skb->ip_summed != CHECKSUM_HW) {
885 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 837 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
886 return; 838 return;
@@ -889,14 +841,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
889 /* is packet ip? 841 /* is packet ip?
890 * if yes: tcp? udp? */ 842 * if yes: tcp? udp? */
891 if (skb->protocol == htons(ETH_P_IP)) { 843 if (skb->protocol == htons(ETH_P_IP)) {
892 if (skb->nh.iph->protocol == IPPROTO_TCP) { 844 if (skb->nh.iph->protocol == IPPROTO_TCP)
893 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; 845 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
894 } else if (skb->nh.iph->protocol == IPPROTO_UDP) { 846 else if (skb->nh.iph->protocol == IPPROTO_UDP)
895 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; 847 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
896 } else { /* the stack should checksum non-tcp and non-udp 848 else /* the stack should checksum non-tcp and non-udp
897 packets on his own: NETIF_F_IP_CSUM */ 849 packets on his own: NETIF_F_IP_CSUM */
898 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 850 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
899 }
900 } 851 }
901} 852}
902 853
@@ -916,10 +867,12 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
916 struct spider_net_descr *descr, 867 struct spider_net_descr *descr,
917 struct sk_buff *skb) 868 struct sk_buff *skb)
918{ 869{
919 dma_addr_t buf = pci_map_single(card->pdev, skb->data, 870 dma_addr_t buf;
920 skb->len, PCI_DMA_BIDIRECTIONAL); 871
872 buf = pci_map_single(card->pdev, skb->data,
873 skb->len, PCI_DMA_BIDIRECTIONAL);
921 if (buf == DMA_ERROR_CODE) { 874 if (buf == DMA_ERROR_CODE) {
922 if (netif_msg_tx_err(card)) 875 if (netif_msg_tx_err(card) && net_ratelimit())
923 pr_err("could not iommu-map packet (%p, %i). " 876 pr_err("could not iommu-map packet (%p, %i). "
924 "Dropping packet\n", skb->data, skb->len); 877 "Dropping packet\n", skb->data, skb->len);
925 return -ENOMEM; 878 return -ENOMEM;
@@ -930,10 +883,6 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
930 descr->skb = skb; 883 descr->skb = skb;
931 descr->data_status = 0; 884 descr->data_status = 0;
932 885
933 /* make sure the above values are in memory before we change the
934 * status */
935 wmb();
936
937 spider_net_set_txdescr_cmdstat(descr,skb); 886 spider_net_set_txdescr_cmdstat(descr,skb);
938 887
939 return 0; 888 return 0;
@@ -975,17 +924,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
975 struct spider_net_descr *descr; 924 struct spider_net_descr *descr;
976 int result; 925 int result;
977 926
978 descr = spider_net_get_next_tx_descr(card); 927 spider_net_release_tx_chain(card, 0);
979 928
980 if (!descr) { 929 descr = spider_net_get_next_tx_descr(card);
981 netif_stop_queue(netdev);
982 930
983 descr = spider_net_get_next_tx_descr(card); 931 if (!descr)
984 if (!descr) 932 goto error;
985 goto error;
986 else
987 netif_start_queue(netdev);
988 }
989 933
990 result = spider_net_prepare_tx_descr(card, descr, skb); 934 result = spider_net_prepare_tx_descr(card, descr, skb);
991 if (result) 935 if (result)
@@ -993,19 +937,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
993 937
994 card->tx_chain.head = card->tx_chain.head->next; 938 card->tx_chain.head = card->tx_chain.head->next;
995 939
996 /* make sure the status from spider_net_prepare_tx_descr is in
997 * memory before we check out the previous descriptor */
998 wmb();
999
1000 if (spider_net_get_descr_status(descr->prev) != 940 if (spider_net_get_descr_status(descr->prev) !=
1001 SPIDER_NET_DESCR_CARDOWNED) 941 SPIDER_NET_DESCR_CARDOWNED) {
1002 spider_net_kick_tx_dma(card, descr); 942 /* make sure the current descriptor is in memory. Then
943 * kicking it on again makes sense, if the previous is not
944 * card-owned anymore. Check the previous descriptor twice
945 * to omit an mb() in heavy traffic cases */
946 mb();
947 if (spider_net_get_descr_status(descr->prev) !=
948 SPIDER_NET_DESCR_CARDOWNED)
949 spider_net_kick_tx_dma(card, descr);
950 }
951
952 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
1003 953
1004 return NETDEV_TX_OK; 954 return NETDEV_TX_OK;
1005 955
1006error: 956error:
1007 card->netdev_stats.tx_dropped++; 957 card->netdev_stats.tx_dropped++;
1008 return NETDEV_TX_LOCKED; 958 return NETDEV_TX_BUSY;
1009} 959}
1010 960
1011/** 961/**
@@ -1030,6 +980,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1030 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 980 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
1031 * @descr: descriptor to process 981 * @descr: descriptor to process
1032 * @card: card structure 982 * @card: card structure
983 * @napi: whether caller is in NAPI context
1033 * 984 *
1034 * returns 1 on success, 0 if no packet was passed to the stack 985 * returns 1 on success, 0 if no packet was passed to the stack
1035 * 986 *
@@ -1038,7 +989,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1038 */ 989 */
1039static int 990static int
1040spider_net_pass_skb_up(struct spider_net_descr *descr, 991spider_net_pass_skb_up(struct spider_net_descr *descr,
1041 struct spider_net_card *card) 992 struct spider_net_card *card, int napi)
1042{ 993{
1043 struct sk_buff *skb; 994 struct sk_buff *skb;
1044 struct net_device *netdev; 995 struct net_device *netdev;
@@ -1049,22 +1000,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1049 1000
1050 netdev = card->netdev; 1001 netdev = card->netdev;
1051 1002
1052 /* check for errors in the data_error flag */ 1003 /* unmap descriptor */
1053 if ((data_error & SPIDER_NET_DATA_ERROR_MASK) && 1004 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
1054 netif_msg_rx_err(card))
1055 pr_err("error in received descriptor found, "
1056 "data_status=x%08x, data_error=x%08x\n",
1057 data_status, data_error);
1058
1059 /* prepare skb, unmap descriptor */
1060 skb = descr->skb;
1061 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
1062 PCI_DMA_BIDIRECTIONAL); 1005 PCI_DMA_BIDIRECTIONAL);
1063 1006
1064 /* the cases we'll throw away the packet immediately */ 1007 /* the cases we'll throw away the packet immediately */
1065 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) 1008 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1009 if (netif_msg_rx_err(card))
1010 pr_err("error in received descriptor found, "
1011 "data_status=x%08x, data_error=x%08x\n",
1012 data_status, data_error);
1066 return 0; 1013 return 0;
1014 }
1067 1015
1016 skb = descr->skb;
1068 skb->dev = netdev; 1017 skb->dev = netdev;
1069 skb_put(skb, descr->valid_size); 1018 skb_put(skb, descr->valid_size);
1070 1019
@@ -1076,14 +1025,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1076 1025
1077 /* checksum offload */ 1026 /* checksum offload */
1078 if (card->options.rx_csum) { 1027 if (card->options.rx_csum) {
1079 if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) && 1028 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
1080 (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) ) 1029 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
1030 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
1081 skb->ip_summed = CHECKSUM_UNNECESSARY; 1031 skb->ip_summed = CHECKSUM_UNNECESSARY;
1082 else 1032 else
1083 skb->ip_summed = CHECKSUM_NONE; 1033 skb->ip_summed = CHECKSUM_NONE;
1084 } else { 1034 } else
1085 skb->ip_summed = CHECKSUM_NONE; 1035 skb->ip_summed = CHECKSUM_NONE;
1086 }
1087 1036
1088 if (data_status & SPIDER_NET_VLAN_PACKET) { 1037 if (data_status & SPIDER_NET_VLAN_PACKET) {
1089 /* further enhancements: HW-accel VLAN 1038 /* further enhancements: HW-accel VLAN
@@ -1092,7 +1041,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1092 } 1041 }
1093 1042
1094 /* pass skb up to stack */ 1043 /* pass skb up to stack */
1095 netif_receive_skb(skb); 1044 if (napi)
1045 netif_receive_skb(skb);
1046 else
1047 netif_rx_ni(skb);
1096 1048
1097 /* update netdevice statistics */ 1049 /* update netdevice statistics */
1098 card->netdev_stats.rx_packets++; 1050 card->netdev_stats.rx_packets++;
@@ -1102,16 +1054,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1102} 1054}
1103 1055
1104/** 1056/**
1105 * spider_net_decode_descr - processes an rx descriptor 1057 * spider_net_decode_one_descr - processes an rx descriptor
1106 * @card: card structure 1058 * @card: card structure
1059 * @napi: whether caller is in NAPI context
1107 * 1060 *
1108 * returns 1 if a packet has been sent to the stack, otherwise 0 1061 * returns 1 if a packet has been sent to the stack, otherwise 0
1109 * 1062 *
1110 * processes an rx descriptor by iommu-unmapping the data buffer and passing 1063 * processes an rx descriptor by iommu-unmapping the data buffer and passing
1111 * the packet up to the stack 1064 * the packet up to the stack. This function is called in softirq
1065 * context, e.g. either bottom half from interrupt or NAPI polling context
1112 */ 1066 */
1113static int 1067static int
1114spider_net_decode_one_descr(struct spider_net_card *card) 1068spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1115{ 1069{
1116 enum spider_net_descr_status status; 1070 enum spider_net_descr_status status;
1117 struct spider_net_descr *descr; 1071 struct spider_net_descr *descr;
@@ -1125,17 +1079,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1125 1079
1126 if (status == SPIDER_NET_DESCR_CARDOWNED) { 1080 if (status == SPIDER_NET_DESCR_CARDOWNED) {
1127 /* nothing in the descriptor yet */ 1081 /* nothing in the descriptor yet */
1128 return 0; 1082 result=0;
1083 goto out;
1129 } 1084 }
1130 1085
1131 if (status == SPIDER_NET_DESCR_NOT_IN_USE) { 1086 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1132 /* not initialized yet, I bet chain->tail == chain->head 1087 /* not initialized yet, the ring must be empty */
1133 * and the ring is empty */
1134 spider_net_refill_rx_chain(card); 1088 spider_net_refill_rx_chain(card);
1135 return 0; 1089 spider_net_enable_rxdmac(card);
1090 result=0;
1091 goto out;
1136 } 1092 }
1137 1093
1138 /* descriptor definitively used -- move on head */ 1094 /* descriptor definitively used -- move on tail */
1139 chain->tail = descr->next; 1095 chain->tail = descr->next;
1140 1096
1141 result = 0; 1097 result = 0;
@@ -1146,6 +1102,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1146 pr_err("%s: dropping RX descriptor with state %d\n", 1102 pr_err("%s: dropping RX descriptor with state %d\n",
1147 card->netdev->name, status); 1103 card->netdev->name, status);
1148 card->netdev_stats.rx_dropped++; 1104 card->netdev_stats.rx_dropped++;
1105 pci_unmap_single(card->pdev, descr->buf_addr,
1106 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
1107 dev_kfree_skb_irq(descr->skb);
1149 goto refill; 1108 goto refill;
1150 } 1109 }
1151 1110
@@ -1158,12 +1117,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1158 } 1117 }
1159 1118
1160 /* ok, we've got a packet in descr */ 1119 /* ok, we've got a packet in descr */
1161 result = spider_net_pass_skb_up(descr, card); 1120 result = spider_net_pass_skb_up(descr, card, napi);
1162refill: 1121refill:
1163 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 1122 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
1164 /* change the descriptor state: */ 1123 /* change the descriptor state: */
1165 spider_net_refill_rx_chain(card); 1124 if (!napi)
1166 1125 spider_net_refill_rx_chain(card);
1126out:
1167 return result; 1127 return result;
1168} 1128}
1169 1129
@@ -1189,7 +1149,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1189 packets_to_do = min(*budget, netdev->quota); 1149 packets_to_do = min(*budget, netdev->quota);
1190 1150
1191 while (packets_to_do) { 1151 while (packets_to_do) {
1192 if (spider_net_decode_one_descr(card)) { 1152 if (spider_net_decode_one_descr(card, 1)) {
1193 packets_done++; 1153 packets_done++;
1194 packets_to_do--; 1154 packets_to_do--;
1195 } else { 1155 } else {
@@ -1201,6 +1161,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1201 1161
1202 netdev->quota -= packets_done; 1162 netdev->quota -= packets_done;
1203 *budget -= packets_done; 1163 *budget -= packets_done;
1164 spider_net_refill_rx_chain(card);
1204 1165
1205 /* if all packets are in the stack, enable interrupts and return 0 */ 1166 /* if all packets are in the stack, enable interrupts and return 0 */
1206 /* if not, return 1 */ 1167 /* if not, return 1 */
@@ -1345,6 +1306,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
1345} 1306}
1346 1307
1347/** 1308/**
1309 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1310 * @card: card structure
1311 *
1312 * spider_net_handle_rxram_full empties the RX ring so that spider can put
1313 * more packets in it and empty its RX RAM. This is called in bottom half
1314 * context
1315 */
1316static void
1317spider_net_handle_rxram_full(struct spider_net_card *card)
1318{
1319 while (spider_net_decode_one_descr(card, 0))
1320 ;
1321 spider_net_enable_rxchtails(card);
1322 spider_net_enable_rxdmac(card);
1323 netif_rx_schedule(card->netdev);
1324}
1325
1326/**
1348 * spider_net_handle_error_irq - handles errors raised by an interrupt 1327 * spider_net_handle_error_irq - handles errors raised by an interrupt
1349 * @card: card structure 1328 * @card: card structure
1350 * @status_reg: interrupt status register 0 (GHIINT0STS) 1329 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1452,17 +1431,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1452 switch (i) 1431 switch (i)
1453 { 1432 {
1454 case SPIDER_NET_GTMFLLINT: 1433 case SPIDER_NET_GTMFLLINT:
1455 if (netif_msg_intr(card)) 1434 if (netif_msg_intr(card) && net_ratelimit())
1456 pr_err("Spider TX RAM full\n"); 1435 pr_err("Spider TX RAM full\n");
1457 show_error = 0; 1436 show_error = 0;
1458 break; 1437 break;
1438 case SPIDER_NET_GRFDFLLINT: /* fallthrough */
1439 case SPIDER_NET_GRFCFLLINT: /* fallthrough */
1440 case SPIDER_NET_GRFBFLLINT: /* fallthrough */
1441 case SPIDER_NET_GRFAFLLINT: /* fallthrough */
1459 case SPIDER_NET_GRMFLLINT: 1442 case SPIDER_NET_GRMFLLINT:
1460 if (netif_msg_intr(card)) 1443 if (netif_msg_intr(card) && net_ratelimit())
1461 pr_err("Spider RX RAM full, incoming packets " 1444 pr_err("Spider RX RAM full, incoming packets "
1462 "might be discarded !\n"); 1445 "might be discarded!\n");
1463 netif_rx_schedule(card->netdev); 1446 spider_net_rx_irq_off(card);
1464 spider_net_enable_rxchtails(card); 1447 tasklet_schedule(&card->rxram_full_tl);
1465 spider_net_enable_rxdmac(card); 1448 show_error = 0;
1466 break; 1449 break;
1467 1450
1468 /* case SPIDER_NET_GTMSHTINT: problem, print a message */ 1451 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
@@ -1470,10 +1453,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1470 /* allrighty. tx from previous descr ok */ 1453 /* allrighty. tx from previous descr ok */
1471 show_error = 0; 1454 show_error = 0;
1472 break; 1455 break;
1473 /* case SPIDER_NET_GRFDFLLINT: print a message down there */
1474 /* case SPIDER_NET_GRFCFLLINT: print a message down there */
1475 /* case SPIDER_NET_GRFBFLLINT: print a message down there */
1476 /* case SPIDER_NET_GRFAFLLINT: print a message down there */
1477 1456
1478 /* chain end */ 1457 /* chain end */
1479 case SPIDER_NET_GDDDCEINT: /* fallthrough */ 1458 case SPIDER_NET_GDDDCEINT: /* fallthrough */
@@ -1485,6 +1464,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1485 "restarting DMAC %c.\n", 1464 "restarting DMAC %c.\n",
1486 'D'+i-SPIDER_NET_GDDDCEINT); 1465 'D'+i-SPIDER_NET_GDDDCEINT);
1487 spider_net_refill_rx_chain(card); 1466 spider_net_refill_rx_chain(card);
1467 spider_net_enable_rxdmac(card);
1488 show_error = 0; 1468 show_error = 0;
1489 break; 1469 break;
1490 1470
@@ -1495,6 +1475,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1495 case SPIDER_NET_GDAINVDINT: 1475 case SPIDER_NET_GDAINVDINT:
1496 /* could happen when rx chain is full */ 1476 /* could happen when rx chain is full */
1497 spider_net_refill_rx_chain(card); 1477 spider_net_refill_rx_chain(card);
1478 spider_net_enable_rxdmac(card);
1498 show_error = 0; 1479 show_error = 0;
1499 break; 1480 break;
1500 1481
@@ -1583,17 +1564,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
1583 if (!status_reg) 1564 if (!status_reg)
1584 return IRQ_NONE; 1565 return IRQ_NONE;
1585 1566
1586 if (status_reg & SPIDER_NET_TXINT)
1587 spider_net_release_tx_chain(card, 0);
1588
1589 if (status_reg & SPIDER_NET_RXINT ) { 1567 if (status_reg & SPIDER_NET_RXINT ) {
1590 spider_net_rx_irq_off(card); 1568 spider_net_rx_irq_off(card);
1591 netif_rx_schedule(netdev); 1569 netif_rx_schedule(netdev);
1592 } 1570 }
1593 1571
1594 /* we do this after rx and tx processing, as we want the tx chain 1572 if (status_reg & SPIDER_NET_ERRINT )
1595 * processed to see, whether we should restart tx dma processing */ 1573 spider_net_handle_error_irq(card, status_reg);
1596 spider_net_handle_error_irq(card, status_reg);
1597 1574
1598 /* clear interrupt sources */ 1575 /* clear interrupt sources */
1599 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); 1576 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1834,26 +1811,27 @@ spider_net_setup_phy(struct spider_net_card *card)
1834/** 1811/**
1835 * spider_net_download_firmware - loads firmware into the adapter 1812 * spider_net_download_firmware - loads firmware into the adapter
1836 * @card: card structure 1813 * @card: card structure
1837 * @firmware: firmware pointer 1814 * @firmware_ptr: pointer to firmware data
1838 * 1815 *
1839 * spider_net_download_firmware loads the firmware opened by 1816 * spider_net_download_firmware loads the firmware data into the
1840 * spider_net_init_firmware into the adapter. 1817 * adapter. It assumes the length etc. to be allright.
1841 */ 1818 */
1842static int 1819static int
1843spider_net_download_firmware(struct spider_net_card *card, 1820spider_net_download_firmware(struct spider_net_card *card,
1844 const struct firmware *firmware) 1821 u8 *firmware_ptr)
1845{ 1822{
1846 int sequencer, i; 1823 int sequencer, i;
1847 u32 *fw_ptr = (u32 *)firmware->data; 1824 u32 *fw_ptr = (u32 *)firmware_ptr;
1848 1825
1849 /* stop sequencers */ 1826 /* stop sequencers */
1850 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1827 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1851 SPIDER_NET_STOP_SEQ_VALUE); 1828 SPIDER_NET_STOP_SEQ_VALUE);
1852 1829
1853 for (sequencer = 0; sequencer < 6; sequencer++) { 1830 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1831 sequencer++) {
1854 spider_net_write_reg(card, 1832 spider_net_write_reg(card,
1855 SPIDER_NET_GSnPRGADR + sequencer * 8, 0); 1833 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1856 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1834 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1857 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1835 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1858 sequencer * 8, *fw_ptr); 1836 sequencer * 8, *fw_ptr);
1859 fw_ptr++; 1837 fw_ptr++;
@@ -1898,41 +1876,53 @@ spider_net_download_firmware(struct spider_net_card *card,
1898static int 1876static int
1899spider_net_init_firmware(struct spider_net_card *card) 1877spider_net_init_firmware(struct spider_net_card *card)
1900{ 1878{
1901 struct firmware *firmware; 1879 struct firmware *firmware = NULL;
1902 struct device_node *dn; 1880 struct device_node *dn;
1903 u8 *fw_prop; 1881 u8 *fw_prop = NULL;
1904 int err = -EIO; 1882 int err = -ENOENT;
1883 int fw_size;
1905 1884
1906 if (request_firmware((const struct firmware **)&firmware, 1885 if (request_firmware((const struct firmware **)&firmware,
1907 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) { 1886 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1908 if (netif_msg_probe(card)) 1887 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1909 pr_err("Couldn't read in sequencer data file %s.\n", 1888 netif_msg_probe(card) ) {
1910 SPIDER_NET_FIRMWARE_NAME); 1889 pr_err("Incorrect size of spidernet firmware in " \
1911 1890 "filesystem. Looking in host firmware...\n");
1912 dn = pci_device_to_OF_node(card->pdev); 1891 goto try_host_fw;
1913 if (!dn) 1892 }
1914 goto out; 1893 err = spider_net_download_firmware(card, firmware->data);
1915 1894
1916 fw_prop = (u8 *)get_property(dn, "firmware", NULL); 1895 release_firmware(firmware);
1917 if (!fw_prop) 1896 if (err)
1918 goto out; 1897 goto try_host_fw;
1919 1898
1920 memcpy(firmware->data, fw_prop, 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)); 1899 goto done;
1921 firmware->size = 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32);
1922 } 1900 }
1923 1901
1924 if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) { 1902try_host_fw:
1925 if (netif_msg_probe(card)) 1903 dn = pci_device_to_OF_node(card->pdev);
1926 pr_err("Invalid size of sequencer data file %s.\n", 1904 if (!dn)
1927 SPIDER_NET_FIRMWARE_NAME); 1905 goto out_err;
1928 goto out; 1906
1907 fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
1908 if (!fw_prop)
1909 goto out_err;
1910
1911 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1912 netif_msg_probe(card) ) {
1913 pr_err("Incorrect size of spidernet firmware in " \
1914 "host firmware\n");
1915 goto done;
1929 } 1916 }
1930 1917
1931 if (!spider_net_download_firmware(card, firmware)) 1918 err = spider_net_download_firmware(card, fw_prop);
1932 err = 0;
1933out:
1934 release_firmware(firmware);
1935 1919
1920done:
1921 return err;
1922out_err:
1923 if (netif_msg_probe(card))
1924 pr_err("Couldn't find spidernet firmware in filesystem " \
1925 "or host firmware\n");
1936 return err; 1926 return err;
1937} 1927}
1938 1928
@@ -1952,10 +1942,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1952 SPIDER_NET_CKRCTRL_RUN_VALUE); 1942 SPIDER_NET_CKRCTRL_RUN_VALUE);
1953 1943
1954 /* empty sequencer data */ 1944 /* empty sequencer data */
1955 for (sequencer = 0; sequencer < 6; sequencer++) { 1945 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1946 sequencer++) {
1956 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1947 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1957 sequencer * 8, 0x0); 1948 sequencer * 8, 0x0);
1958 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1949 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1959 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1950 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1960 sequencer * 8, 0x0); 1951 sequencer * 8, 0x0);
1961 } 1952 }
@@ -2079,7 +2070,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
2079 SET_NETDEV_DEV(netdev, &card->pdev->dev); 2070 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2080 2071
2081 pci_set_drvdata(card->pdev, netdev); 2072 pci_set_drvdata(card->pdev, netdev);
2082 spin_lock_init(&card->intmask_lock); 2073
2074 atomic_set(&card->tx_chain_release,0);
2075 card->rxram_full_tl.data = (unsigned long) card;
2076 card->rxram_full_tl.func =
2077 (void (*)(unsigned long)) spider_net_handle_rxram_full;
2078 init_timer(&card->tx_timer);
2079 card->tx_timer.function =
2080 (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
2081 card->tx_timer.data = (unsigned long) card;
2083 netdev->irq = card->pdev->irq; 2082 netdev->irq = card->pdev->irq;
2084 2083
2085 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2084 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;