aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/spider_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/spider_net.c')
-rw-r--r--drivers/net/spider_net.c512
1 files changed, 265 insertions, 247 deletions
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 0d765f1733b5..1f5975a61e1f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/config.h> 24#include <linux/config.h>
25
26#include <linux/compiler.h> 25#include <linux/compiler.h>
27#include <linux/crc32.h> 26#include <linux/crc32.h>
28#include <linux/delay.h> 27#include <linux/delay.h>
@@ -30,6 +29,7 @@
30#include <linux/ethtool.h> 29#include <linux/ethtool.h>
31#include <linux/firmware.h> 30#include <linux/firmware.h>
32#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
32#include <linux/in.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/ioport.h> 34#include <linux/ioport.h>
35#include <linux/ip.h> 35#include <linux/ip.h>
@@ -43,6 +43,7 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/tcp.h> 44#include <linux/tcp.h>
45#include <linux/types.h> 45#include <linux/types.h>
46#include <linux/vmalloc.h>
46#include <linux/wait.h> 47#include <linux/wait.h>
47#include <linux/workqueue.h> 48#include <linux/workqueue.h>
48#include <asm/bitops.h> 49#include <asm/bitops.h>
@@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
108 writel(value, card->regs + reg); 109 writel(value, card->regs + reg);
109} 110}
110 111
111/**
112 * spider_net_write_reg_sync - writes to an SMMIO register of a card
113 * @card: device structure
114 * @reg: register to write to
115 * @value: value to write into the specified SMMIO register
116 *
117 * Unlike spider_net_write_reg, this will also make sure the
118 * data arrives on the card by reading the reg again.
119 */
120static void
121spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
122{
123 value = cpu_to_le32(value);
124 writel(value, card->regs + reg);
125 (void)readl(card->regs + reg);
126}
127
128/**
129 * spider_net_rx_irq_off - switch off rx irq on this spider card
130 * @card: device structure
131 *
132 * switches off rx irq by masking them out in the GHIINTnMSK register
133 */
134static void
135spider_net_rx_irq_off(struct spider_net_card *card)
136{
137 u32 regvalue;
138 unsigned long flags;
139
140 spin_lock_irqsave(&card->intmask_lock, flags);
141 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
142 regvalue &= ~SPIDER_NET_RXINT;
143 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
144 spin_unlock_irqrestore(&card->intmask_lock, flags);
145}
146
147/** spider_net_write_phy - write to phy register 112/** spider_net_write_phy - write to phy register
148 * @netdev: adapter to be written to 113 * @netdev: adapter to be written to
149 * @mii_id: id of MII 114 * @mii_id: id of MII
@@ -199,60 +164,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
199} 164}
200 165
201/** 166/**
202 * spider_net_rx_irq_on - switch on rx irq on this spider card 167 * spider_net_rx_irq_off - switch off rx irq on this spider card
203 * @card: device structure
204 *
205 * switches on rx irq by enabling them in the GHIINTnMSK register
206 */
207static void
208spider_net_rx_irq_on(struct spider_net_card *card)
209{
210 u32 regvalue;
211 unsigned long flags;
212
213 spin_lock_irqsave(&card->intmask_lock, flags);
214 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
215 regvalue |= SPIDER_NET_RXINT;
216 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
217 spin_unlock_irqrestore(&card->intmask_lock, flags);
218}
219
220/**
221 * spider_net_tx_irq_off - switch off tx irq on this spider card
222 * @card: device structure 168 * @card: device structure
223 * 169 *
224 * switches off tx irq by masking them out in the GHIINTnMSK register 170 * switches off rx irq by masking them out in the GHIINTnMSK register
225 */ 171 */
226static void 172static void
227spider_net_tx_irq_off(struct spider_net_card *card) 173spider_net_rx_irq_off(struct spider_net_card *card)
228{ 174{
229 u32 regvalue; 175 u32 regvalue;
230 unsigned long flags;
231 176
232 spin_lock_irqsave(&card->intmask_lock, flags); 177 regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
233 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 178 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
234 regvalue &= ~SPIDER_NET_TXINT;
235 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
236 spin_unlock_irqrestore(&card->intmask_lock, flags);
237} 179}
238 180
239/** 181/**
240 * spider_net_tx_irq_on - switch on tx irq on this spider card 182 * spider_net_rx_irq_on - switch on rx irq on this spider card
241 * @card: device structure 183 * @card: device structure
242 * 184 *
243 * switches on tx irq by enabling them in the GHIINTnMSK register 185 * switches on rx irq by enabling them in the GHIINTnMSK register
244 */ 186 */
245static void 187static void
246spider_net_tx_irq_on(struct spider_net_card *card) 188spider_net_rx_irq_on(struct spider_net_card *card)
247{ 189{
248 u32 regvalue; 190 u32 regvalue;
249 unsigned long flags;
250 191
251 spin_lock_irqsave(&card->intmask_lock, flags); 192 regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
252 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); 193 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
253 regvalue |= SPIDER_NET_TXINT;
254 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
255 spin_unlock_irqrestore(&card->intmask_lock, flags);
256} 194}
257 195
258/** 196/**
@@ -326,9 +264,8 @@ static enum spider_net_descr_status
326spider_net_get_descr_status(struct spider_net_descr *descr) 264spider_net_get_descr_status(struct spider_net_descr *descr)
327{ 265{
328 u32 cmd_status; 266 u32 cmd_status;
329 rmb(); 267
330 cmd_status = descr->dmac_cmd_status; 268 cmd_status = descr->dmac_cmd_status;
331 rmb();
332 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; 269 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
333 /* no need to mask out any bits, as cmd_status is 32 bits wide only 270 /* no need to mask out any bits, as cmd_status is 32 bits wide only
334 * (and unsigned) */ 271 * (and unsigned) */
@@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
349{ 286{
350 u32 cmd_status; 287 u32 cmd_status;
351 /* read the status */ 288 /* read the status */
352 mb();
353 cmd_status = descr->dmac_cmd_status; 289 cmd_status = descr->dmac_cmd_status;
354 /* clean the upper 4 bits */ 290 /* clean the upper 4 bits */
355 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; 291 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
@@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
357 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; 293 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
358 /* and write it back */ 294 /* and write it back */
359 descr->dmac_cmd_status = cmd_status; 295 descr->dmac_cmd_status = cmd_status;
360 wmb();
361} 296}
362 297
363/** 298/**
@@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card,
398{ 333{
399 int i; 334 int i;
400 struct spider_net_descr *descr; 335 struct spider_net_descr *descr;
336 dma_addr_t buf;
401 337
402 spin_lock_init(&card->chain_lock); 338 atomic_set(&card->rx_chain_refill,0);
403 339
404 descr = start_descr; 340 descr = start_descr;
405 memset(descr, 0, sizeof(*descr) * no); 341 memset(descr, 0, sizeof(*descr) * no);
@@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card,
408 for (i=0; i<no; i++, descr++) { 344 for (i=0; i<no; i++, descr++) {
409 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 345 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
410 346
411 descr->bus_addr = 347 buf = pci_map_single(card->pdev, descr,
412 pci_map_single(card->pdev, descr, 348 SPIDER_NET_DESCR_SIZE,
413 SPIDER_NET_DESCR_SIZE, 349 PCI_DMA_BIDIRECTIONAL);
414 PCI_DMA_BIDIRECTIONAL);
415 350
416 if (descr->bus_addr == DMA_ERROR_CODE) 351 if (buf == DMA_ERROR_CODE)
417 goto iommu_error; 352 goto iommu_error;
418 353
354 descr->bus_addr = buf;
419 descr->next = descr + 1; 355 descr->next = descr + 1;
420 descr->prev = descr - 1; 356 descr->prev = descr - 1;
421 357
@@ -439,7 +375,8 @@ iommu_error:
439 for (i=0; i < no; i++, descr++) 375 for (i=0; i < no; i++, descr++)
440 if (descr->bus_addr) 376 if (descr->bus_addr)
441 pci_unmap_single(card->pdev, descr->bus_addr, 377 pci_unmap_single(card->pdev, descr->bus_addr,
442 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL); 378 SPIDER_NET_DESCR_SIZE,
379 PCI_DMA_BIDIRECTIONAL);
443 return -ENOMEM; 380 return -ENOMEM;
444} 381}
445 382
@@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
459 if (descr->skb) { 396 if (descr->skb) {
460 dev_kfree_skb(descr->skb); 397 dev_kfree_skb(descr->skb);
461 pci_unmap_single(card->pdev, descr->buf_addr, 398 pci_unmap_single(card->pdev, descr->buf_addr,
462 SPIDER_NET_MAX_MTU, 399 SPIDER_NET_MAX_FRAME,
463 PCI_DMA_BIDIRECTIONAL); 400 PCI_DMA_BIDIRECTIONAL);
464 } 401 }
465 descr = descr->next; 402 descr = descr->next;
@@ -480,12 +417,13 @@ static int
480spider_net_prepare_rx_descr(struct spider_net_card *card, 417spider_net_prepare_rx_descr(struct spider_net_card *card,
481 struct spider_net_descr *descr) 418 struct spider_net_descr *descr)
482{ 419{
420 dma_addr_t buf;
483 int error = 0; 421 int error = 0;
484 int offset; 422 int offset;
485 int bufsize; 423 int bufsize;
486 424
487 /* we need to round up the buffer size to a multiple of 128 */ 425 /* we need to round up the buffer size to a multiple of 128 */
488 bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) & 426 bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
489 (~(SPIDER_NET_RXBUF_ALIGN - 1)); 427 (~(SPIDER_NET_RXBUF_ALIGN - 1));
490 428
491 /* and we need to have it 128 byte aligned, therefore we allocate a 429 /* and we need to have it 128 byte aligned, therefore we allocate a
@@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
493 /* allocate an skb */ 431 /* allocate an skb */
494 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); 432 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
495 if (!descr->skb) { 433 if (!descr->skb) {
496 if (net_ratelimit()) 434 if (netif_msg_rx_err(card) && net_ratelimit())
497 if (netif_msg_rx_err(card)) 435 pr_err("Not enough memory to allocate rx buffer\n");
498 pr_err("Not enough memory to allocate "
499 "rx buffer\n");
500 return -ENOMEM; 436 return -ENOMEM;
501 } 437 }
502 descr->buf_size = bufsize; 438 descr->buf_size = bufsize;
@@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
510 if (offset) 446 if (offset)
511 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); 447 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
512 /* io-mmu-map the skb */ 448 /* io-mmu-map the skb */
513 descr->buf_addr = pci_map_single(card->pdev, descr->skb->data, 449 buf = pci_map_single(card->pdev, descr->skb->data,
514 SPIDER_NET_MAX_MTU, 450 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
515 PCI_DMA_BIDIRECTIONAL); 451 descr->buf_addr = buf;
516 if (descr->buf_addr == DMA_ERROR_CODE) { 452 if (buf == DMA_ERROR_CODE) {
517 dev_kfree_skb_any(descr->skb); 453 dev_kfree_skb_any(descr->skb);
518 if (netif_msg_rx_err(card)) 454 if (netif_msg_rx_err(card) && net_ratelimit())
519 pr_err("Could not iommu-map rx buffer\n"); 455 pr_err("Could not iommu-map rx buffer\n");
520 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 456 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
521 } else { 457 } else {
@@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
526} 462}
527 463
528/** 464/**
529 * spider_net_enable_rxctails - sets RX dmac chain tail addresses 465 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
530 * @card: card structure 466 * @card: card structure
531 * 467 *
532 * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the 468 * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
533 * chip by writing to the appropriate register. DMA is enabled in 469 * chip by writing to the appropriate register. DMA is enabled in
534 * spider_net_enable_rxdmac. 470 * spider_net_enable_rxdmac.
535 */ 471 */
@@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
551static void 487static void
552spider_net_enable_rxdmac(struct spider_net_card *card) 488spider_net_enable_rxdmac(struct spider_net_card *card)
553{ 489{
490 wmb();
554 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, 491 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
555 SPIDER_NET_DMA_RX_VALUE); 492 SPIDER_NET_DMA_RX_VALUE);
556} 493}
@@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
559 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains 496 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
560 * @card: card structure 497 * @card: card structure
561 * 498 *
562 * refills descriptors in all chains (last used chain first): allocates skbs 499 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
563 * and iommu-maps them.
564 */ 500 */
565static void 501static void
566spider_net_refill_rx_chain(struct spider_net_card *card) 502spider_net_refill_rx_chain(struct spider_net_card *card)
567{ 503{
568 struct spider_net_descr_chain *chain; 504 struct spider_net_descr_chain *chain;
569 int count = 0;
570 unsigned long flags;
571 505
572 chain = &card->rx_chain; 506 chain = &card->rx_chain;
573 507
574 spin_lock_irqsave(&card->chain_lock, flags); 508 /* one context doing the refill (and a second context seeing that
575 while (spider_net_get_descr_status(chain->head) == 509 * and omitting it) is ok. If called by NAPI, we'll be called again
576 SPIDER_NET_DESCR_NOT_IN_USE) { 510 * as spider_net_decode_one_descr is called several times. If some
577 if (spider_net_prepare_rx_descr(card, chain->head)) 511 * interrupt calls us, the NAPI is about to clean up anyway. */
578 break; 512 if (atomic_inc_return(&card->rx_chain_refill) == 1)
579 count++; 513 while (spider_net_get_descr_status(chain->head) ==
580 chain->head = chain->head->next; 514 SPIDER_NET_DESCR_NOT_IN_USE) {
581 } 515 if (spider_net_prepare_rx_descr(card, chain->head))
582 spin_unlock_irqrestore(&card->chain_lock, flags); 516 break;
517 chain->head = chain->head->next;
518 }
583 519
584 /* could be optimized, only do that, if we know the DMA processing 520 atomic_dec(&card->rx_chain_refill);
585 * has terminated */
586 if (count)
587 spider_net_enable_rxdmac(card);
588} 521}
589 522
590/** 523/**
@@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
613 /* this will allocate the rest of the rx buffers; if not, it's 546 /* this will allocate the rest of the rx buffers; if not, it's
614 * business as usual later on */ 547 * business as usual later on */
615 spider_net_refill_rx_chain(card); 548 spider_net_refill_rx_chain(card);
549 spider_net_enable_rxdmac(card);
616 return 0; 550 return 0;
617 551
618error: 552error:
@@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
649 * @card: adapter structure 583 * @card: adapter structure
650 * @brutal: if set, don't care about whether descriptor seems to be in use 584 * @brutal: if set, don't care about whether descriptor seems to be in use
651 * 585 *
652 * releases the tx descriptors that spider has finished with (if non-brutal) 586 * returns 0 if the tx ring is empty, otherwise 1.
653 * or simply release tx descriptors (if brutal) 587 *
588 * spider_net_release_tx_chain releases the tx descriptors that spider has
589 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
590 * If some other context is calling this function, we return 1 so that we're
591 * scheduled again (if we were scheduled) and will not loose initiative.
654 */ 592 */
655static void 593static int
656spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 594spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
657{ 595{
658 struct spider_net_descr_chain *tx_chain = &card->tx_chain; 596 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
659 enum spider_net_descr_status status; 597 enum spider_net_descr_status status;
660 598
661 spider_net_tx_irq_off(card); 599 if (atomic_inc_return(&card->tx_chain_release) != 1) {
600 atomic_dec(&card->tx_chain_release);
601 return 1;
602 }
662 603
663 /* no lock for chain needed, if this is only executed once at a time */
664again:
665 for (;;) { 604 for (;;) {
666 status = spider_net_get_descr_status(tx_chain->tail); 605 status = spider_net_get_descr_status(tx_chain->tail);
667 switch (status) { 606 switch (status) {
668 case SPIDER_NET_DESCR_CARDOWNED: 607 case SPIDER_NET_DESCR_CARDOWNED:
669 if (!brutal) goto out; 608 if (!brutal)
609 goto out;
670 /* fallthrough, if we release the descriptors 610 /* fallthrough, if we release the descriptors
671 * brutally (then we don't care about 611 * brutally (then we don't care about
672 * SPIDER_NET_DESCR_CARDOWNED) */ 612 * SPIDER_NET_DESCR_CARDOWNED) */
@@ -693,25 +633,30 @@ again:
693 tx_chain->tail = tx_chain->tail->next; 633 tx_chain->tail = tx_chain->tail->next;
694 } 634 }
695out: 635out:
636 atomic_dec(&card->tx_chain_release);
637
696 netif_wake_queue(card->netdev); 638 netif_wake_queue(card->netdev);
697 639
698 if (!brutal) { 640 if (status == SPIDER_NET_DESCR_CARDOWNED)
699 /* switch on tx irqs (while we are still in the interrupt 641 return 1;
700 * handler, so we don't get an interrupt), check again 642 return 0;
701 * for done descriptors. This results in fewer interrupts */ 643}
702 spider_net_tx_irq_on(card);
703 status = spider_net_get_descr_status(tx_chain->tail);
704 switch (status) {
705 case SPIDER_NET_DESCR_RESPONSE_ERROR:
706 case SPIDER_NET_DESCR_PROTECTION_ERROR:
707 case SPIDER_NET_DESCR_FORCE_END:
708 case SPIDER_NET_DESCR_COMPLETE:
709 goto again;
710 default:
711 break;
712 }
713 }
714 644
645/**
646 * spider_net_cleanup_tx_ring - cleans up the TX ring
647 * @card: card structure
648 *
649 * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
650 * interrupts to cleanup our TX ring) and returns sent packets to the stack
651 * by freeing them
652 */
653static void
654spider_net_cleanup_tx_ring(struct spider_net_card *card)
655{
656 if ( (spider_net_release_tx_chain(card, 0)) &&
657 (card->netdev->flags & IFF_UP) ) {
658 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
659 }
715} 660}
716 661
717/** 662/**
@@ -726,16 +671,22 @@ out:
726static u8 671static u8
727spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) 672spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
728{ 673{
729 /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
730 * ff:ff:ff:ff:ff:ff must result in 0xfd */
731 u32 crc; 674 u32 crc;
732 u8 hash; 675 u8 hash;
676 char addr_for_crc[ETH_ALEN] = { 0, };
677 int i, bit;
733 678
734 crc = crc32_be(~0, addr, netdev->addr_len); 679 for (i = 0; i < ETH_ALEN * 8; i++) {
680 bit = (addr[i / 8] >> (i % 8)) & 1;
681 addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
682 }
683
684 crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
735 685
736 hash = (crc >> 27); 686 hash = (crc >> 27);
737 hash <<= 3; 687 hash <<= 3;
738 hash |= crc & 7; 688 hash |= crc & 7;
689 hash &= 0xff;
739 690
740 return hash; 691 return hash;
741} 692}
@@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev)
821{ 772{
822 struct spider_net_card *card = netdev_priv(netdev); 773 struct spider_net_card *card = netdev_priv(netdev);
823 774
775 tasklet_kill(&card->rxram_full_tl);
824 netif_poll_disable(netdev); 776 netif_poll_disable(netdev);
825 netif_carrier_off(netdev); 777 netif_carrier_off(netdev);
826 netif_stop_queue(netdev); 778 netif_stop_queue(netdev);
779 del_timer_sync(&card->tx_timer);
827 780
828 /* disable/mask all interrupts */ 781 /* disable/mask all interrupts */
829 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); 782 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
@@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
872 * @skb: packet to consider 825 * @skb: packet to consider
873 * 826 *
874 * fills out the command and status field of the descriptor structure, 827 * fills out the command and status field of the descriptor structure,
875 * depending on hardware checksum settings. This function assumes a wmb() 828 * depending on hardware checksum settings.
876 * has executed before.
877 */ 829 */
878static void 830static void
879spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, 831spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
880 struct sk_buff *skb) 832 struct sk_buff *skb)
881{ 833{
834 /* make sure the other fields in the descriptor are written */
835 wmb();
836
882 if (skb->ip_summed != CHECKSUM_HW) { 837 if (skb->ip_summed != CHECKSUM_HW) {
883 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 838 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
884 return; 839 return;
@@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
887 /* is packet ip? 842 /* is packet ip?
888 * if yes: tcp? udp? */ 843 * if yes: tcp? udp? */
889 if (skb->protocol == htons(ETH_P_IP)) { 844 if (skb->protocol == htons(ETH_P_IP)) {
890 if (skb->nh.iph->protocol == IPPROTO_TCP) { 845 if (skb->nh.iph->protocol == IPPROTO_TCP)
891 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; 846 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
892 } else if (skb->nh.iph->protocol == IPPROTO_UDP) { 847 else if (skb->nh.iph->protocol == IPPROTO_UDP)
893 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; 848 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
894 } else { /* the stack should checksum non-tcp and non-udp 849 else /* the stack should checksum non-tcp and non-udp
895 packets on his own: NETIF_F_IP_CSUM */ 850 packets on his own: NETIF_F_IP_CSUM */
896 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; 851 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
897 }
898 } 852 }
899} 853}
900 854
@@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
914 struct spider_net_descr *descr, 868 struct spider_net_descr *descr,
915 struct sk_buff *skb) 869 struct sk_buff *skb)
916{ 870{
917 descr->buf_addr = pci_map_single(card->pdev, skb->data, 871 dma_addr_t buf;
918 skb->len, PCI_DMA_BIDIRECTIONAL); 872
919 if (descr->buf_addr == DMA_ERROR_CODE) { 873 buf = pci_map_single(card->pdev, skb->data,
920 if (netif_msg_tx_err(card)) 874 skb->len, PCI_DMA_BIDIRECTIONAL);
875 if (buf == DMA_ERROR_CODE) {
876 if (netif_msg_tx_err(card) && net_ratelimit())
921 pr_err("could not iommu-map packet (%p, %i). " 877 pr_err("could not iommu-map packet (%p, %i). "
922 "Dropping packet\n", skb->data, skb->len); 878 "Dropping packet\n", skb->data, skb->len);
923 return -ENOMEM; 879 return -ENOMEM;
924 } 880 }
925 881
882 descr->buf_addr = buf;
926 descr->buf_size = skb->len; 883 descr->buf_size = skb->len;
927 descr->skb = skb; 884 descr->skb = skb;
928 descr->data_status = 0; 885 descr->data_status = 0;
929 886
930 /* make sure the above values are in memory before we change the
931 * status */
932 wmb();
933
934 spider_net_set_txdescr_cmdstat(descr,skb); 887 spider_net_set_txdescr_cmdstat(descr,skb);
935 888
936 return 0; 889 return 0;
@@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
972 struct spider_net_descr *descr; 925 struct spider_net_descr *descr;
973 int result; 926 int result;
974 927
975 descr = spider_net_get_next_tx_descr(card); 928 spider_net_release_tx_chain(card, 0);
976 929
977 if (!descr) { 930 descr = spider_net_get_next_tx_descr(card);
978 netif_stop_queue(netdev);
979 931
980 descr = spider_net_get_next_tx_descr(card); 932 if (!descr)
981 if (!descr) 933 goto error;
982 goto error;
983 else
984 netif_start_queue(netdev);
985 }
986 934
987 result = spider_net_prepare_tx_descr(card, descr, skb); 935 result = spider_net_prepare_tx_descr(card, descr, skb);
988 if (result) 936 if (result)
@@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
990 938
991 card->tx_chain.head = card->tx_chain.head->next; 939 card->tx_chain.head = card->tx_chain.head->next;
992 940
993 /* make sure the status from spider_net_prepare_tx_descr is in
994 * memory before we check out the previous descriptor */
995 wmb();
996
997 if (spider_net_get_descr_status(descr->prev) != 941 if (spider_net_get_descr_status(descr->prev) !=
998 SPIDER_NET_DESCR_CARDOWNED) 942 SPIDER_NET_DESCR_CARDOWNED) {
999 spider_net_kick_tx_dma(card, descr); 943 /* make sure the current descriptor is in memory. Then
944 * kicking it on again makes sense, if the previous is not
945 * card-owned anymore. Check the previous descriptor twice
946 * to omit an mb() in heavy traffic cases */
947 mb();
948 if (spider_net_get_descr_status(descr->prev) !=
949 SPIDER_NET_DESCR_CARDOWNED)
950 spider_net_kick_tx_dma(card, descr);
951 }
952
953 mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
1000 954
1001 return NETDEV_TX_OK; 955 return NETDEV_TX_OK;
1002 956
1003error: 957error:
1004 card->netdev_stats.tx_dropped++; 958 card->netdev_stats.tx_dropped++;
1005 return NETDEV_TX_LOCKED; 959 return NETDEV_TX_BUSY;
1006} 960}
1007 961
1008/** 962/**
@@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1027 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on 981 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
1028 * @descr: descriptor to process 982 * @descr: descriptor to process
1029 * @card: card structure 983 * @card: card structure
984 * @napi: whether caller is in NAPI context
1030 * 985 *
1031 * returns 1 on success, 0 if no packet was passed to the stack 986 * returns 1 on success, 0 if no packet was passed to the stack
1032 * 987 *
@@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1035 */ 990 */
1036static int 991static int
1037spider_net_pass_skb_up(struct spider_net_descr *descr, 992spider_net_pass_skb_up(struct spider_net_descr *descr,
1038 struct spider_net_card *card) 993 struct spider_net_card *card, int napi)
1039{ 994{
1040 struct sk_buff *skb; 995 struct sk_buff *skb;
1041 struct net_device *netdev; 996 struct net_device *netdev;
@@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1046 1001
1047 netdev = card->netdev; 1002 netdev = card->netdev;
1048 1003
1049 /* check for errors in the data_error flag */ 1004 /* unmap descriptor */
1050 if ((data_error & SPIDER_NET_DATA_ERROR_MASK) && 1005 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
1051 netif_msg_rx_err(card))
1052 pr_err("error in received descriptor found, "
1053 "data_status=x%08x, data_error=x%08x\n",
1054 data_status, data_error);
1055
1056 /* prepare skb, unmap descriptor */
1057 skb = descr->skb;
1058 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
1059 PCI_DMA_BIDIRECTIONAL); 1006 PCI_DMA_BIDIRECTIONAL);
1060 1007
1061 /* the cases we'll throw away the packet immediately */ 1008 /* the cases we'll throw away the packet immediately */
1062 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) 1009 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1010 if (netif_msg_rx_err(card))
1011 pr_err("error in received descriptor found, "
1012 "data_status=x%08x, data_error=x%08x\n",
1013 data_status, data_error);
1063 return 0; 1014 return 0;
1015 }
1064 1016
1017 skb = descr->skb;
1065 skb->dev = netdev; 1018 skb->dev = netdev;
1066 skb_put(skb, descr->valid_size); 1019 skb_put(skb, descr->valid_size);
1067 1020
@@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1073 1026
1074 /* checksum offload */ 1027 /* checksum offload */
1075 if (card->options.rx_csum) { 1028 if (card->options.rx_csum) {
1076 if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) && 1029 if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
1077 (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) ) 1030 SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
1031 !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
1078 skb->ip_summed = CHECKSUM_UNNECESSARY; 1032 skb->ip_summed = CHECKSUM_UNNECESSARY;
1079 else 1033 else
1080 skb->ip_summed = CHECKSUM_NONE; 1034 skb->ip_summed = CHECKSUM_NONE;
1081 } else { 1035 } else
1082 skb->ip_summed = CHECKSUM_NONE; 1036 skb->ip_summed = CHECKSUM_NONE;
1083 }
1084 1037
1085 if (data_status & SPIDER_NET_VLAN_PACKET) { 1038 if (data_status & SPIDER_NET_VLAN_PACKET) {
1086 /* further enhancements: HW-accel VLAN 1039 /* further enhancements: HW-accel VLAN
@@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1089 } 1042 }
1090 1043
1091 /* pass skb up to stack */ 1044 /* pass skb up to stack */
1092 netif_receive_skb(skb); 1045 if (napi)
1046 netif_receive_skb(skb);
1047 else
1048 netif_rx_ni(skb);
1093 1049
1094 /* update netdevice statistics */ 1050 /* update netdevice statistics */
1095 card->netdev_stats.rx_packets++; 1051 card->netdev_stats.rx_packets++;
@@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1099} 1055}
1100 1056
1101/** 1057/**
1102 * spider_net_decode_descr - processes an rx descriptor 1058 * spider_net_decode_one_descr - processes an rx descriptor
1103 * @card: card structure 1059 * @card: card structure
1060 * @napi: whether caller is in NAPI context
1104 * 1061 *
1105 * returns 1 if a packet has been sent to the stack, otherwise 0 1062 * returns 1 if a packet has been sent to the stack, otherwise 0
1106 * 1063 *
1107 * processes an rx descriptor by iommu-unmapping the data buffer and passing 1064 * processes an rx descriptor by iommu-unmapping the data buffer and passing
1108 * the packet up to the stack 1065 * the packet up to the stack. This function is called in softirq
1066 * context, e.g. either bottom half from interrupt or NAPI polling context
1109 */ 1067 */
1110static int 1068static int
1111spider_net_decode_one_descr(struct spider_net_card *card) 1069spider_net_decode_one_descr(struct spider_net_card *card, int napi)
1112{ 1070{
1113 enum spider_net_descr_status status; 1071 enum spider_net_descr_status status;
1114 struct spider_net_descr *descr; 1072 struct spider_net_descr *descr;
@@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1122 1080
1123 if (status == SPIDER_NET_DESCR_CARDOWNED) { 1081 if (status == SPIDER_NET_DESCR_CARDOWNED) {
1124 /* nothing in the descriptor yet */ 1082 /* nothing in the descriptor yet */
1125 return 0; 1083 result=0;
1084 goto out;
1126 } 1085 }
1127 1086
1128 if (status == SPIDER_NET_DESCR_NOT_IN_USE) { 1087 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1129 /* not initialized yet, I bet chain->tail == chain->head 1088 /* not initialized yet, the ring must be empty */
1130 * and the ring is empty */
1131 spider_net_refill_rx_chain(card); 1089 spider_net_refill_rx_chain(card);
1132 return 0; 1090 spider_net_enable_rxdmac(card);
1091 result=0;
1092 goto out;
1133 } 1093 }
1134 1094
1135 /* descriptor definitively used -- move on head */ 1095 /* descriptor definitively used -- move on tail */
1136 chain->tail = descr->next; 1096 chain->tail = descr->next;
1137 1097
1138 result = 0; 1098 result = 0;
@@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1143 pr_err("%s: dropping RX descriptor with state %d\n", 1103 pr_err("%s: dropping RX descriptor with state %d\n",
1144 card->netdev->name, status); 1104 card->netdev->name, status);
1145 card->netdev_stats.rx_dropped++; 1105 card->netdev_stats.rx_dropped++;
1106 pci_unmap_single(card->pdev, descr->buf_addr,
1107 SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
1108 dev_kfree_skb_irq(descr->skb);
1146 goto refill; 1109 goto refill;
1147 } 1110 }
1148 1111
@@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1155 } 1118 }
1156 1119
1157 /* ok, we've got a packet in descr */ 1120 /* ok, we've got a packet in descr */
1158 result = spider_net_pass_skb_up(descr, card); 1121 result = spider_net_pass_skb_up(descr, card, napi);
1159refill: 1122refill:
1160 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); 1123 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
1161 /* change the descriptor state: */ 1124 /* change the descriptor state: */
1162 spider_net_refill_rx_chain(card); 1125 if (!napi)
1163 1126 spider_net_refill_rx_chain(card);
1127out:
1164 return result; 1128 return result;
1165} 1129}
1166 1130
@@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1186 packets_to_do = min(*budget, netdev->quota); 1150 packets_to_do = min(*budget, netdev->quota);
1187 1151
1188 while (packets_to_do) { 1152 while (packets_to_do) {
1189 if (spider_net_decode_one_descr(card)) { 1153 if (spider_net_decode_one_descr(card, 1)) {
1190 packets_done++; 1154 packets_done++;
1191 packets_to_do--; 1155 packets_to_do--;
1192 } else { 1156 } else {
@@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
1198 1162
1199 netdev->quota -= packets_done; 1163 netdev->quota -= packets_done;
1200 *budget -= packets_done; 1164 *budget -= packets_done;
1165 spider_net_refill_rx_chain(card);
1201 1166
1202 /* if all packets are in the stack, enable interrupts and return 0 */ 1167 /* if all packets are in the stack, enable interrupts and return 0 */
1203 /* if not, return 1 */ 1168 /* if not, return 1 */
@@ -1342,6 +1307,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
1342} 1307}
1343 1308
1344/** 1309/**
1310 * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
1311 * @card: card structure
1312 *
1313 * spider_net_handle_rxram_full empties the RX ring so that spider can put
1314 * more packets in it and empty its RX RAM. This is called in bottom half
1315 * context
1316 */
1317static void
1318spider_net_handle_rxram_full(struct spider_net_card *card)
1319{
1320 while (spider_net_decode_one_descr(card, 0))
1321 ;
1322 spider_net_enable_rxchtails(card);
1323 spider_net_enable_rxdmac(card);
1324 netif_rx_schedule(card->netdev);
1325}
1326
1327/**
1345 * spider_net_handle_error_irq - handles errors raised by an interrupt 1328 * spider_net_handle_error_irq - handles errors raised by an interrupt
1346 * @card: card structure 1329 * @card: card structure
1347 * @status_reg: interrupt status register 0 (GHIINT0STS) 1330 * @status_reg: interrupt status register 0 (GHIINT0STS)
@@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1449 switch (i) 1432 switch (i)
1450 { 1433 {
1451 case SPIDER_NET_GTMFLLINT: 1434 case SPIDER_NET_GTMFLLINT:
1452 if (netif_msg_intr(card)) 1435 if (netif_msg_intr(card) && net_ratelimit())
1453 pr_err("Spider TX RAM full\n"); 1436 pr_err("Spider TX RAM full\n");
1454 show_error = 0; 1437 show_error = 0;
1455 break; 1438 break;
1439 case SPIDER_NET_GRFDFLLINT: /* fallthrough */
1440 case SPIDER_NET_GRFCFLLINT: /* fallthrough */
1441 case SPIDER_NET_GRFBFLLINT: /* fallthrough */
1442 case SPIDER_NET_GRFAFLLINT: /* fallthrough */
1456 case SPIDER_NET_GRMFLLINT: 1443 case SPIDER_NET_GRMFLLINT:
1457 if (netif_msg_intr(card)) 1444 if (netif_msg_intr(card) && net_ratelimit())
1458 pr_err("Spider RX RAM full, incoming packets " 1445 pr_err("Spider RX RAM full, incoming packets "
1459 "might be discarded !\n"); 1446 "might be discarded!\n");
1460 netif_rx_schedule(card->netdev); 1447 spider_net_rx_irq_off(card);
1461 spider_net_enable_rxchtails(card); 1448 tasklet_schedule(&card->rxram_full_tl);
1462 spider_net_enable_rxdmac(card); 1449 show_error = 0;
1463 break; 1450 break;
1464 1451
1465 /* case SPIDER_NET_GTMSHTINT: problem, print a message */ 1452 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
@@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1467 /* allrighty. tx from previous descr ok */ 1454 /* allrighty. tx from previous descr ok */
1468 show_error = 0; 1455 show_error = 0;
1469 break; 1456 break;
1470 /* case SPIDER_NET_GRFDFLLINT: print a message down there */
1471 /* case SPIDER_NET_GRFCFLLINT: print a message down there */
1472 /* case SPIDER_NET_GRFBFLLINT: print a message down there */
1473 /* case SPIDER_NET_GRFAFLLINT: print a message down there */
1474 1457
1475 /* chain end */ 1458 /* chain end */
1476 case SPIDER_NET_GDDDCEINT: /* fallthrough */ 1459 case SPIDER_NET_GDDDCEINT: /* fallthrough */
@@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1482 "restarting DMAC %c.\n", 1465 "restarting DMAC %c.\n",
1483 'D'+i-SPIDER_NET_GDDDCEINT); 1466 'D'+i-SPIDER_NET_GDDDCEINT);
1484 spider_net_refill_rx_chain(card); 1467 spider_net_refill_rx_chain(card);
1468 spider_net_enable_rxdmac(card);
1485 show_error = 0; 1469 show_error = 0;
1486 break; 1470 break;
1487 1471
@@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1492 case SPIDER_NET_GDAINVDINT: 1476 case SPIDER_NET_GDAINVDINT:
1493 /* could happen when rx chain is full */ 1477 /* could happen when rx chain is full */
1494 spider_net_refill_rx_chain(card); 1478 spider_net_refill_rx_chain(card);
1479 spider_net_enable_rxdmac(card);
1495 show_error = 0; 1480 show_error = 0;
1496 break; 1481 break;
1497 1482
@@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
1580 if (!status_reg) 1565 if (!status_reg)
1581 return IRQ_NONE; 1566 return IRQ_NONE;
1582 1567
1583 if (status_reg & SPIDER_NET_TXINT)
1584 spider_net_release_tx_chain(card, 0);
1585
1586 if (status_reg & SPIDER_NET_RXINT ) { 1568 if (status_reg & SPIDER_NET_RXINT ) {
1587 spider_net_rx_irq_off(card); 1569 spider_net_rx_irq_off(card);
1588 netif_rx_schedule(netdev); 1570 netif_rx_schedule(netdev);
1589 } 1571 }
1590 1572
1591 /* we do this after rx and tx processing, as we want the tx chain 1573 if (status_reg & SPIDER_NET_ERRINT )
1592 * processed to see, whether we should restart tx dma processing */ 1574 spider_net_handle_error_irq(card, status_reg);
1593 spider_net_handle_error_irq(card, status_reg);
1594 1575
1595 /* clear interrupt sources */ 1576 /* clear interrupt sources */
1596 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); 1577 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card)
1831/** 1812/**
1832 * spider_net_download_firmware - loads firmware into the adapter 1813 * spider_net_download_firmware - loads firmware into the adapter
1833 * @card: card structure 1814 * @card: card structure
1834 * @firmware: firmware pointer 1815 * @firmware_ptr: pointer to firmware data
1835 * 1816 *
1836 * spider_net_download_firmware loads the firmware opened by 1817 * spider_net_download_firmware loads the firmware data into the
1837 * spider_net_init_firmware into the adapter. 1818 * adapter. It assumes the length etc. to be allright.
1838 */ 1819 */
1839static void 1820static int
1840spider_net_download_firmware(struct spider_net_card *card, 1821spider_net_download_firmware(struct spider_net_card *card,
1841 const struct firmware *firmware) 1822 u8 *firmware_ptr)
1842{ 1823{
1843 int sequencer, i; 1824 int sequencer, i;
1844 u32 *fw_ptr = (u32 *)firmware->data; 1825 u32 *fw_ptr = (u32 *)firmware_ptr;
1845 1826
1846 /* stop sequencers */ 1827 /* stop sequencers */
1847 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1828 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1848 SPIDER_NET_STOP_SEQ_VALUE); 1829 SPIDER_NET_STOP_SEQ_VALUE);
1849 1830
1850 for (sequencer = 0; sequencer < 6; sequencer++) { 1831 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1832 sequencer++) {
1851 spider_net_write_reg(card, 1833 spider_net_write_reg(card,
1852 SPIDER_NET_GSnPRGADR + sequencer * 8, 0); 1834 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1853 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1835 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1854 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1836 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1855 sequencer * 8, *fw_ptr); 1837 sequencer * 8, *fw_ptr);
1856 fw_ptr++; 1838 fw_ptr++;
1857 } 1839 }
1858 } 1840 }
1859 1841
1842 if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1843 return -EIO;
1844
1860 spider_net_write_reg(card, SPIDER_NET_GSINIT, 1845 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1861 SPIDER_NET_RUN_SEQ_VALUE); 1846 SPIDER_NET_RUN_SEQ_VALUE);
1847
1848 return 0;
1862} 1849}
1863 1850
1864/** 1851/**
@@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card,
1890static int 1877static int
1891spider_net_init_firmware(struct spider_net_card *card) 1878spider_net_init_firmware(struct spider_net_card *card)
1892{ 1879{
1893 const struct firmware *firmware; 1880 struct firmware *firmware = NULL;
1894 int err = -EIO; 1881 struct device_node *dn;
1882 u8 *fw_prop = NULL;
1883 int err = -ENOENT;
1884 int fw_size;
1885
1886 if (request_firmware((const struct firmware **)&firmware,
1887 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1888 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1889 netif_msg_probe(card) ) {
1890 pr_err("Incorrect size of spidernet firmware in " \
1891 "filesystem. Looking in host firmware...\n");
1892 goto try_host_fw;
1893 }
1894 err = spider_net_download_firmware(card, firmware->data);
1895 1895
1896 if (request_firmware(&firmware, 1896 release_firmware(firmware);
1897 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) { 1897 if (err)
1898 if (netif_msg_probe(card)) 1898 goto try_host_fw;
1899 pr_err("Couldn't read in sequencer data file %s.\n",
1900 SPIDER_NET_FIRMWARE_NAME);
1901 firmware = NULL;
1902 goto out;
1903 }
1904 1899
1905 if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) { 1900 goto done;
1906 if (netif_msg_probe(card))
1907 pr_err("Invalid size of sequencer data file %s.\n",
1908 SPIDER_NET_FIRMWARE_NAME);
1909 goto out;
1910 } 1901 }
1911 1902
1912 spider_net_download_firmware(card, firmware); 1903try_host_fw:
1904 dn = pci_device_to_OF_node(card->pdev);
1905 if (!dn)
1906 goto out_err;
1913 1907
1914 err = 0; 1908 fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
1915out: 1909 if (!fw_prop)
1916 release_firmware(firmware); 1910 goto out_err;
1911
1912 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1913 netif_msg_probe(card) ) {
1914 pr_err("Incorrect size of spidernet firmware in " \
1915 "host firmware\n");
1916 goto done;
1917 }
1917 1918
1919 err = spider_net_download_firmware(card, fw_prop);
1920
1921done:
1922 return err;
1923out_err:
1924 if (netif_msg_probe(card))
1925 pr_err("Couldn't find spidernet firmware in filesystem " \
1926 "or host firmware\n");
1918 return err; 1927 return err;
1919} 1928}
1920 1929
@@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
1934 SPIDER_NET_CKRCTRL_RUN_VALUE); 1943 SPIDER_NET_CKRCTRL_RUN_VALUE);
1935 1944
1936 /* empty sequencer data */ 1945 /* empty sequencer data */
1937 for (sequencer = 0; sequencer < 6; sequencer++) { 1946 for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1947 sequencer++) {
1938 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1948 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1939 sequencer * 8, 0x0); 1949 sequencer * 8, 0x0);
1940 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { 1950 for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1941 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + 1951 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1942 sequencer * 8, 0x0); 1952 sequencer * 8, 0x0);
1943 } 1953 }
@@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
2061 SET_NETDEV_DEV(netdev, &card->pdev->dev); 2071 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2062 2072
2063 pci_set_drvdata(card->pdev, netdev); 2073 pci_set_drvdata(card->pdev, netdev);
2064 spin_lock_init(&card->intmask_lock); 2074
2075 atomic_set(&card->tx_chain_release,0);
2076 card->rxram_full_tl.data = (unsigned long) card;
2077 card->rxram_full_tl.func =
2078 (void (*)(unsigned long)) spider_net_handle_rxram_full;
2079 init_timer(&card->tx_timer);
2080 card->tx_timer.function =
2081 (void (*)(unsigned long)) spider_net_cleanup_tx_ring;
2082 card->tx_timer.data = (unsigned long) card;
2065 netdev->irq = card->pdev->irq; 2083 netdev->irq = card->pdev->irq;
2066 2084
2067 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2085 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;