diff options
-rw-r--r-- | drivers/net/spider_net.c | 578 | ||||
-rw-r--r-- | drivers/net/spider_net.h | 73 |
2 files changed, 274 insertions, 377 deletions
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 05b474f2c8b5..647f62e9707d 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl); | |||
84 | * | 84 | * |
85 | * returns the content of the specified SMMIO register. | 85 | * returns the content of the specified SMMIO register. |
86 | */ | 86 | */ |
87 | static u32 | 87 | static inline u32 |
88 | spider_net_read_reg(struct spider_net_card *card, u32 reg) | 88 | spider_net_read_reg(struct spider_net_card *card, u32 reg) |
89 | { | 89 | { |
90 | u32 value; | 90 | u32 value; |
@@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg) | |||
101 | * @reg: register to write to | 101 | * @reg: register to write to |
102 | * @value: value to write into the specified SMMIO register | 102 | * @value: value to write into the specified SMMIO register |
103 | */ | 103 | */ |
104 | static void | 104 | static inline void |
105 | spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) | 105 | spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) |
106 | { | 106 | { |
107 | value = cpu_to_le32(value); | 107 | value = cpu_to_le32(value); |
@@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev) | |||
259 | * | 259 | * |
260 | * returns the status as in the dmac_cmd_status field of the descriptor | 260 | * returns the status as in the dmac_cmd_status field of the descriptor |
261 | */ | 261 | */ |
262 | static enum spider_net_descr_status | 262 | static inline int |
263 | spider_net_get_descr_status(struct spider_net_descr *descr) | 263 | spider_net_get_descr_status(struct spider_net_descr *descr) |
264 | { | 264 | { |
265 | u32 cmd_status; | 265 | return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK; |
266 | |||
267 | cmd_status = descr->dmac_cmd_status; | ||
268 | cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; | ||
269 | /* no need to mask out any bits, as cmd_status is 32 bits wide only | ||
270 | * (and unsigned) */ | ||
271 | return cmd_status; | ||
272 | } | ||
273 | |||
274 | /** | ||
275 | * spider_net_set_descr_status -- sets the status of a descriptor | ||
276 | * @descr: descriptor to change | ||
277 | * @status: status to set in the descriptor | ||
278 | * | ||
279 | * changes the status to the specified value. Doesn't change other bits | ||
280 | * in the status | ||
281 | */ | ||
282 | static void | ||
283 | spider_net_set_descr_status(struct spider_net_descr *descr, | ||
284 | enum spider_net_descr_status status) | ||
285 | { | ||
286 | u32 cmd_status; | ||
287 | /* read the status */ | ||
288 | cmd_status = descr->dmac_cmd_status; | ||
289 | /* clean the upper 4 bits */ | ||
290 | cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; | ||
291 | /* add the status to it */ | ||
292 | cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; | ||
293 | /* and write it back */ | ||
294 | descr->dmac_cmd_status = cmd_status; | ||
295 | } | 266 | } |
296 | 267 | ||
297 | /** | 268 | /** |
@@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card, | |||
328 | static int | 299 | static int |
329 | spider_net_init_chain(struct spider_net_card *card, | 300 | spider_net_init_chain(struct spider_net_card *card, |
330 | struct spider_net_descr_chain *chain, | 301 | struct spider_net_descr_chain *chain, |
331 | struct spider_net_descr *start_descr, int no) | 302 | struct spider_net_descr *start_descr, |
303 | int direction, int no) | ||
332 | { | 304 | { |
333 | int i; | 305 | int i; |
334 | struct spider_net_descr *descr; | 306 | struct spider_net_descr *descr; |
335 | dma_addr_t buf; | 307 | dma_addr_t buf; |
336 | 308 | ||
337 | atomic_set(&card->rx_chain_refill,0); | ||
338 | |||
339 | descr = start_descr; | 309 | descr = start_descr; |
340 | memset(descr, 0, sizeof(*descr) * no); | 310 | memset(descr, 0, sizeof(*descr) * no); |
341 | 311 | ||
342 | /* set up the hardware pointers in each descriptor */ | 312 | /* set up the hardware pointers in each descriptor */ |
343 | for (i=0; i<no; i++, descr++) { | 313 | for (i=0; i<no; i++, descr++) { |
344 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | 314 | descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; |
345 | 315 | ||
346 | buf = pci_map_single(card->pdev, descr, | 316 | buf = pci_map_single(card->pdev, descr, |
347 | SPIDER_NET_DESCR_SIZE, | 317 | SPIDER_NET_DESCR_SIZE, |
348 | PCI_DMA_BIDIRECTIONAL); | 318 | direction); |
349 | 319 | ||
350 | if (buf == DMA_ERROR_CODE) | 320 | if (buf == DMA_ERROR_CODE) |
351 | goto iommu_error; | 321 | goto iommu_error; |
@@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card, | |||
360 | start_descr->prev = descr-1; | 330 | start_descr->prev = descr-1; |
361 | 331 | ||
362 | descr = start_descr; | 332 | descr = start_descr; |
363 | for (i=0; i < no; i++, descr++) { | 333 | if (direction == PCI_DMA_FROMDEVICE) |
364 | descr->next_descr_addr = descr->next->bus_addr; | 334 | for (i=0; i < no; i++, descr++) |
365 | } | 335 | descr->next_descr_addr = descr->next->bus_addr; |
366 | 336 | ||
337 | spin_lock_init(&chain->lock); | ||
367 | chain->head = start_descr; | 338 | chain->head = start_descr; |
368 | chain->tail = start_descr; | 339 | chain->tail = start_descr; |
369 | 340 | ||
@@ -375,7 +346,7 @@ iommu_error: | |||
375 | if (descr->bus_addr) | 346 | if (descr->bus_addr) |
376 | pci_unmap_single(card->pdev, descr->bus_addr, | 347 | pci_unmap_single(card->pdev, descr->bus_addr, |
377 | SPIDER_NET_DESCR_SIZE, | 348 | SPIDER_NET_DESCR_SIZE, |
378 | PCI_DMA_BIDIRECTIONAL); | 349 | direction); |
379 | return -ENOMEM; | 350 | return -ENOMEM; |
380 | } | 351 | } |
381 | 352 | ||
@@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) | |||
396 | dev_kfree_skb(descr->skb); | 367 | dev_kfree_skb(descr->skb); |
397 | pci_unmap_single(card->pdev, descr->buf_addr, | 368 | pci_unmap_single(card->pdev, descr->buf_addr, |
398 | SPIDER_NET_MAX_FRAME, | 369 | SPIDER_NET_MAX_FRAME, |
399 | PCI_DMA_BIDIRECTIONAL); | 370 | PCI_DMA_FROMDEVICE); |
400 | } | 371 | } |
401 | descr = descr->next; | 372 | descr = descr->next; |
402 | } | 373 | } |
@@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
446 | skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); | 417 | skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); |
447 | /* io-mmu-map the skb */ | 418 | /* io-mmu-map the skb */ |
448 | buf = pci_map_single(card->pdev, descr->skb->data, | 419 | buf = pci_map_single(card->pdev, descr->skb->data, |
449 | SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); | 420 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
450 | descr->buf_addr = buf; | 421 | descr->buf_addr = buf; |
451 | if (buf == DMA_ERROR_CODE) { | 422 | if (buf == DMA_ERROR_CODE) { |
452 | dev_kfree_skb_any(descr->skb); | 423 | dev_kfree_skb_any(descr->skb); |
453 | if (netif_msg_rx_err(card) && net_ratelimit()) | 424 | if (netif_msg_rx_err(card) && net_ratelimit()) |
454 | pr_err("Could not iommu-map rx buffer\n"); | 425 | pr_err("Could not iommu-map rx buffer\n"); |
455 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | 426 | descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; |
456 | } else { | 427 | } else { |
457 | descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED; | 428 | descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED | |
429 | SPIDER_NET_DMAC_NOINTR_COMPLETE; | ||
458 | } | 430 | } |
459 | 431 | ||
460 | return error; | 432 | return error; |
@@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
468 | * chip by writing to the appropriate register. DMA is enabled in | 440 | * chip by writing to the appropriate register. DMA is enabled in |
469 | * spider_net_enable_rxdmac. | 441 | * spider_net_enable_rxdmac. |
470 | */ | 442 | */ |
471 | static void | 443 | static inline void |
472 | spider_net_enable_rxchtails(struct spider_net_card *card) | 444 | spider_net_enable_rxchtails(struct spider_net_card *card) |
473 | { | 445 | { |
474 | /* assume chain is aligned correctly */ | 446 | /* assume chain is aligned correctly */ |
@@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card) | |||
483 | * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN | 455 | * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN |
484 | * in the GDADMACCNTR register | 456 | * in the GDADMACCNTR register |
485 | */ | 457 | */ |
486 | static void | 458 | static inline void |
487 | spider_net_enable_rxdmac(struct spider_net_card *card) | 459 | spider_net_enable_rxdmac(struct spider_net_card *card) |
488 | { | 460 | { |
489 | wmb(); | 461 | wmb(); |
@@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card) | |||
500 | static void | 472 | static void |
501 | spider_net_refill_rx_chain(struct spider_net_card *card) | 473 | spider_net_refill_rx_chain(struct spider_net_card *card) |
502 | { | 474 | { |
503 | struct spider_net_descr_chain *chain; | 475 | struct spider_net_descr_chain *chain = &card->rx_chain; |
504 | 476 | unsigned long flags; | |
505 | chain = &card->rx_chain; | ||
506 | 477 | ||
507 | /* one context doing the refill (and a second context seeing that | 478 | /* one context doing the refill (and a second context seeing that |
508 | * and omitting it) is ok. If called by NAPI, we'll be called again | 479 | * and omitting it) is ok. If called by NAPI, we'll be called again |
509 | * as spider_net_decode_one_descr is called several times. If some | 480 | * as spider_net_decode_one_descr is called several times. If some |
510 | * interrupt calls us, the NAPI is about to clean up anyway. */ | 481 | * interrupt calls us, the NAPI is about to clean up anyway. */ |
511 | if (atomic_inc_return(&card->rx_chain_refill) == 1) | 482 | if (!spin_trylock_irqsave(&chain->lock, flags)) |
512 | while (spider_net_get_descr_status(chain->head) == | 483 | return; |
513 | SPIDER_NET_DESCR_NOT_IN_USE) { | 484 | |
514 | if (spider_net_prepare_rx_descr(card, chain->head)) | 485 | while (spider_net_get_descr_status(chain->head) == |
515 | break; | 486 | SPIDER_NET_DESCR_NOT_IN_USE) { |
516 | chain->head = chain->head->next; | 487 | if (spider_net_prepare_rx_descr(card, chain->head)) |
517 | } | 488 | break; |
489 | chain->head = chain->head->next; | ||
490 | } | ||
518 | 491 | ||
519 | atomic_dec(&card->rx_chain_refill); | 492 | spin_unlock_irqrestore(&chain->lock, flags); |
520 | } | 493 | } |
521 | 494 | ||
522 | /** | 495 | /** |
@@ -554,111 +527,6 @@ error: | |||
554 | } | 527 | } |
555 | 528 | ||
556 | /** | 529 | /** |
557 | * spider_net_release_tx_descr - processes a used tx descriptor | ||
558 | * @card: card structure | ||
559 | * @descr: descriptor to release | ||
560 | * | ||
561 | * releases a used tx descriptor (unmapping, freeing of skb) | ||
562 | */ | ||
563 | static void | ||
564 | spider_net_release_tx_descr(struct spider_net_card *card, | ||
565 | struct spider_net_descr *descr) | ||
566 | { | ||
567 | struct sk_buff *skb; | ||
568 | |||
569 | /* unmap the skb */ | ||
570 | skb = descr->skb; | ||
571 | pci_unmap_single(card->pdev, descr->buf_addr, skb->len, | ||
572 | PCI_DMA_BIDIRECTIONAL); | ||
573 | |||
574 | dev_kfree_skb_any(skb); | ||
575 | |||
576 | /* set status to not used */ | ||
577 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * spider_net_release_tx_chain - processes sent tx descriptors | ||
582 | * @card: adapter structure | ||
583 | * @brutal: if set, don't care about whether descriptor seems to be in use | ||
584 | * | ||
585 | * returns 0 if the tx ring is empty, otherwise 1. | ||
586 | * | ||
587 | * spider_net_release_tx_chain releases the tx descriptors that spider has | ||
588 | * finished with (if non-brutal) or simply release tx descriptors (if brutal). | ||
589 | * If some other context is calling this function, we return 1 so that we're | ||
590 | * scheduled again (if we were scheduled) and will not loose initiative. | ||
591 | */ | ||
592 | static int | ||
593 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | ||
594 | { | ||
595 | struct spider_net_descr_chain *tx_chain = &card->tx_chain; | ||
596 | enum spider_net_descr_status status; | ||
597 | |||
598 | if (atomic_inc_return(&card->tx_chain_release) != 1) { | ||
599 | atomic_dec(&card->tx_chain_release); | ||
600 | return 1; | ||
601 | } | ||
602 | |||
603 | for (;;) { | ||
604 | status = spider_net_get_descr_status(tx_chain->tail); | ||
605 | switch (status) { | ||
606 | case SPIDER_NET_DESCR_CARDOWNED: | ||
607 | if (!brutal) | ||
608 | goto out; | ||
609 | /* fallthrough, if we release the descriptors | ||
610 | * brutally (then we don't care about | ||
611 | * SPIDER_NET_DESCR_CARDOWNED) */ | ||
612 | case SPIDER_NET_DESCR_RESPONSE_ERROR: | ||
613 | case SPIDER_NET_DESCR_PROTECTION_ERROR: | ||
614 | case SPIDER_NET_DESCR_FORCE_END: | ||
615 | if (netif_msg_tx_err(card)) | ||
616 | pr_err("%s: forcing end of tx descriptor " | ||
617 | "with status x%02x\n", | ||
618 | card->netdev->name, status); | ||
619 | card->netdev_stats.tx_dropped++; | ||
620 | break; | ||
621 | |||
622 | case SPIDER_NET_DESCR_COMPLETE: | ||
623 | card->netdev_stats.tx_packets++; | ||
624 | card->netdev_stats.tx_bytes += | ||
625 | tx_chain->tail->skb->len; | ||
626 | break; | ||
627 | |||
628 | default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */ | ||
629 | goto out; | ||
630 | } | ||
631 | spider_net_release_tx_descr(card, tx_chain->tail); | ||
632 | tx_chain->tail = tx_chain->tail->next; | ||
633 | } | ||
634 | out: | ||
635 | atomic_dec(&card->tx_chain_release); | ||
636 | |||
637 | netif_wake_queue(card->netdev); | ||
638 | |||
639 | if (status == SPIDER_NET_DESCR_CARDOWNED) | ||
640 | return 1; | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * spider_net_cleanup_tx_ring - cleans up the TX ring | ||
646 | * @card: card structure | ||
647 | * | ||
648 | * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use | ||
649 | * interrupts to cleanup our TX ring) and returns sent packets to the stack | ||
650 | * by freeing them | ||
651 | */ | ||
652 | static void | ||
653 | spider_net_cleanup_tx_ring(struct spider_net_card *card) | ||
654 | { | ||
655 | if ( (spider_net_release_tx_chain(card, 0)) && | ||
656 | (card->netdev->flags & IFF_UP) ) { | ||
657 | mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); | ||
658 | } | ||
659 | } | ||
660 | |||
661 | /** | ||
662 | * spider_net_get_multicast_hash - generates hash for multicast filter table | 530 | * spider_net_get_multicast_hash - generates hash for multicast filter table |
663 | * @addr: multicast address | 531 | * @addr: multicast address |
664 | * | 532 | * |
@@ -761,97 +629,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card) | |||
761 | } | 629 | } |
762 | 630 | ||
763 | /** | 631 | /** |
764 | * spider_net_stop - called upon ifconfig down | ||
765 | * @netdev: interface device structure | ||
766 | * | ||
767 | * always returns 0 | ||
768 | */ | ||
769 | int | ||
770 | spider_net_stop(struct net_device *netdev) | ||
771 | { | ||
772 | struct spider_net_card *card = netdev_priv(netdev); | ||
773 | |||
774 | tasklet_kill(&card->rxram_full_tl); | ||
775 | netif_poll_disable(netdev); | ||
776 | netif_carrier_off(netdev); | ||
777 | netif_stop_queue(netdev); | ||
778 | del_timer_sync(&card->tx_timer); | ||
779 | |||
780 | /* disable/mask all interrupts */ | ||
781 | spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); | ||
782 | spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); | ||
783 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); | ||
784 | |||
785 | /* free_irq(netdev->irq, netdev);*/ | ||
786 | free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); | ||
787 | |||
788 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
789 | SPIDER_NET_DMA_TX_FEND_VALUE); | ||
790 | |||
791 | /* turn off DMA, force end */ | ||
792 | spider_net_disable_rxdmac(card); | ||
793 | |||
794 | /* release chains */ | ||
795 | spider_net_release_tx_chain(card, 1); | ||
796 | |||
797 | spider_net_free_chain(card, &card->tx_chain); | ||
798 | spider_net_free_chain(card, &card->rx_chain); | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | * spider_net_get_next_tx_descr - returns the next available tx descriptor | ||
805 | * @card: device structure to get descriptor from | ||
806 | * | ||
807 | * returns the address of the next descriptor, or NULL if not available. | ||
808 | */ | ||
809 | static struct spider_net_descr * | ||
810 | spider_net_get_next_tx_descr(struct spider_net_card *card) | ||
811 | { | ||
812 | /* check, if head points to not-in-use descr */ | ||
813 | if ( spider_net_get_descr_status(card->tx_chain.head) == | ||
814 | SPIDER_NET_DESCR_NOT_IN_USE ) { | ||
815 | return card->tx_chain.head; | ||
816 | } else { | ||
817 | return NULL; | ||
818 | } | ||
819 | } | ||
820 | |||
821 | /** | ||
822 | * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field | ||
823 | * @descr: descriptor structure to fill out | ||
824 | * @skb: packet to consider | ||
825 | * | ||
826 | * fills out the command and status field of the descriptor structure, | ||
827 | * depending on hardware checksum settings. | ||
828 | */ | ||
829 | static void | ||
830 | spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, | ||
831 | struct sk_buff *skb) | ||
832 | { | ||
833 | /* make sure the other fields in the descriptor are written */ | ||
834 | wmb(); | ||
835 | |||
836 | if (skb->ip_summed != CHECKSUM_HW) { | ||
837 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; | ||
838 | return; | ||
839 | } | ||
840 | |||
841 | /* is packet ip? | ||
842 | * if yes: tcp? udp? */ | ||
843 | if (skb->protocol == htons(ETH_P_IP)) { | ||
844 | if (skb->nh.iph->protocol == IPPROTO_TCP) | ||
845 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; | ||
846 | else if (skb->nh.iph->protocol == IPPROTO_UDP) | ||
847 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; | ||
848 | else /* the stack should checksum non-tcp and non-udp | ||
849 | packets on his own: NETIF_F_IP_CSUM */ | ||
850 | descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; | ||
851 | } | ||
852 | } | ||
853 | |||
854 | /** | ||
855 | * spider_net_prepare_tx_descr - fill tx descriptor with skb data | 632 | * spider_net_prepare_tx_descr - fill tx descriptor with skb data |
856 | * @card: card structure | 633 | * @card: card structure |
857 | * @descr: descriptor structure to fill out | 634 | * @descr: descriptor structure to fill out |
@@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, | |||
864 | */ | 641 | */ |
865 | static int | 642 | static int |
866 | spider_net_prepare_tx_descr(struct spider_net_card *card, | 643 | spider_net_prepare_tx_descr(struct spider_net_card *card, |
867 | struct spider_net_descr *descr, | ||
868 | struct sk_buff *skb) | 644 | struct sk_buff *skb) |
869 | { | 645 | { |
646 | struct spider_net_descr *descr = card->tx_chain.head; | ||
870 | dma_addr_t buf; | 647 | dma_addr_t buf; |
871 | 648 | ||
872 | buf = pci_map_single(card->pdev, skb->data, | 649 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); |
873 | skb->len, PCI_DMA_BIDIRECTIONAL); | ||
874 | if (buf == DMA_ERROR_CODE) { | 650 | if (buf == DMA_ERROR_CODE) { |
875 | if (netif_msg_tx_err(card) && net_ratelimit()) | 651 | if (netif_msg_tx_err(card) && net_ratelimit()) |
876 | pr_err("could not iommu-map packet (%p, %i). " | 652 | pr_err("could not iommu-map packet (%p, %i). " |
@@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
880 | 656 | ||
881 | descr->buf_addr = buf; | 657 | descr->buf_addr = buf; |
882 | descr->buf_size = skb->len; | 658 | descr->buf_size = skb->len; |
659 | descr->next_descr_addr = 0; | ||
883 | descr->skb = skb; | 660 | descr->skb = skb; |
884 | descr->data_status = 0; | 661 | descr->data_status = 0; |
885 | 662 | ||
886 | spider_net_set_txdescr_cmdstat(descr,skb); | 663 | descr->dmac_cmd_status = |
664 | SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; | ||
665 | if (skb->protocol == htons(ETH_P_IP)) | ||
666 | switch (skb->nh.iph->protocol) { | ||
667 | case IPPROTO_TCP: | ||
668 | descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP; | ||
669 | break; | ||
670 | case IPPROTO_UDP: | ||
671 | descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP; | ||
672 | break; | ||
673 | } | ||
674 | |||
675 | descr->prev->next_descr_addr = descr->bus_addr; | ||
676 | |||
677 | return 0; | ||
678 | } | ||
679 | |||
680 | /** | ||
681 | * spider_net_release_tx_descr - processes a used tx descriptor | ||
682 | * @card: card structure | ||
683 | * @descr: descriptor to release | ||
684 | * | ||
685 | * releases a used tx descriptor (unmapping, freeing of skb) | ||
686 | */ | ||
687 | static inline void | ||
688 | spider_net_release_tx_descr(struct spider_net_card *card) | ||
689 | { | ||
690 | struct spider_net_descr *descr = card->tx_chain.tail; | ||
691 | struct sk_buff *skb; | ||
692 | |||
693 | card->tx_chain.tail = card->tx_chain.tail->next; | ||
694 | descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; | ||
695 | |||
696 | /* unmap the skb */ | ||
697 | skb = descr->skb; | ||
698 | pci_unmap_single(card->pdev, descr->buf_addr, skb->len, | ||
699 | PCI_DMA_TODEVICE); | ||
700 | dev_kfree_skb_any(skb); | ||
701 | } | ||
702 | |||
703 | /** | ||
704 | * spider_net_release_tx_chain - processes sent tx descriptors | ||
705 | * @card: adapter structure | ||
706 | * @brutal: if set, don't care about whether descriptor seems to be in use | ||
707 | * | ||
708 | * returns 0 if the tx ring is empty, otherwise 1. | ||
709 | * | ||
710 | * spider_net_release_tx_chain releases the tx descriptors that spider has | ||
711 | * finished with (if non-brutal) or simply release tx descriptors (if brutal). | ||
712 | * If some other context is calling this function, we return 1 so that we're | ||
713 | * scheduled again (if we were scheduled) and will not loose initiative. | ||
714 | */ | ||
715 | static int | ||
716 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | ||
717 | { | ||
718 | struct spider_net_descr_chain *chain = &card->tx_chain; | ||
719 | int status; | ||
720 | |||
721 | spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR); | ||
722 | |||
723 | while (chain->tail != chain->head) { | ||
724 | status = spider_net_get_descr_status(chain->tail); | ||
725 | switch (status) { | ||
726 | case SPIDER_NET_DESCR_COMPLETE: | ||
727 | card->netdev_stats.tx_packets++; | ||
728 | card->netdev_stats.tx_bytes += chain->tail->skb->len; | ||
729 | break; | ||
730 | |||
731 | case SPIDER_NET_DESCR_CARDOWNED: | ||
732 | if (!brutal) | ||
733 | return 1; | ||
734 | /* fallthrough, if we release the descriptors | ||
735 | * brutally (then we don't care about | ||
736 | * SPIDER_NET_DESCR_CARDOWNED) */ | ||
737 | |||
738 | case SPIDER_NET_DESCR_RESPONSE_ERROR: | ||
739 | case SPIDER_NET_DESCR_PROTECTION_ERROR: | ||
740 | case SPIDER_NET_DESCR_FORCE_END: | ||
741 | if (netif_msg_tx_err(card)) | ||
742 | pr_err("%s: forcing end of tx descriptor " | ||
743 | "with status x%02x\n", | ||
744 | card->netdev->name, status); | ||
745 | card->netdev_stats.tx_errors++; | ||
746 | break; | ||
747 | |||
748 | default: | ||
749 | card->netdev_stats.tx_dropped++; | ||
750 | return 1; | ||
751 | } | ||
752 | spider_net_release_tx_descr(card); | ||
753 | } | ||
887 | 754 | ||
888 | return 0; | 755 | return 0; |
889 | } | 756 | } |
@@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
896 | * spider_net_kick_tx_dma writes the current tx chain head as start address | 763 | * spider_net_kick_tx_dma writes the current tx chain head as start address |
897 | * of the tx descriptor chain and enables the transmission DMA engine | 764 | * of the tx descriptor chain and enables the transmission DMA engine |
898 | */ | 765 | */ |
899 | static void | 766 | static inline void |
900 | spider_net_kick_tx_dma(struct spider_net_card *card, | 767 | spider_net_kick_tx_dma(struct spider_net_card *card) |
901 | struct spider_net_descr *descr) | ||
902 | { | 768 | { |
903 | /* this is the only descriptor in the output chain. | 769 | struct spider_net_descr *descr; |
904 | * Enable TX DMA */ | ||
905 | 770 | ||
906 | spider_net_write_reg(card, SPIDER_NET_GDTDCHA, | 771 | if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) & |
907 | descr->bus_addr); | 772 | SPIDER_NET_TX_DMA_EN) |
773 | goto out; | ||
908 | 774 | ||
909 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | 775 | descr = card->tx_chain.tail; |
910 | SPIDER_NET_DMA_TX_VALUE); | 776 | for (;;) { |
777 | if (spider_net_get_descr_status(descr) == | ||
778 | SPIDER_NET_DESCR_CARDOWNED) { | ||
779 | spider_net_write_reg(card, SPIDER_NET_GDTDCHA, | ||
780 | descr->bus_addr); | ||
781 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
782 | SPIDER_NET_DMA_TX_VALUE); | ||
783 | break; | ||
784 | } | ||
785 | if (descr == card->tx_chain.head) | ||
786 | break; | ||
787 | descr = descr->next; | ||
788 | } | ||
789 | |||
790 | out: | ||
791 | mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); | ||
911 | } | 792 | } |
912 | 793 | ||
913 | /** | 794 | /** |
@@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card, | |||
915 | * @skb: packet to send out | 796 | * @skb: packet to send out |
916 | * @netdev: interface device structure | 797 | * @netdev: interface device structure |
917 | * | 798 | * |
918 | * returns 0 on success, <0 on failure | 799 | * returns 0 on success, !0 on failure |
919 | */ | 800 | */ |
920 | static int | 801 | static int |
921 | spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) | 802 | spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) |
922 | { | 803 | { |
923 | struct spider_net_card *card = netdev_priv(netdev); | 804 | struct spider_net_card *card = netdev_priv(netdev); |
924 | struct spider_net_descr *descr; | 805 | struct spider_net_descr_chain *chain = &card->tx_chain; |
806 | struct spider_net_descr *descr = chain->head; | ||
807 | unsigned long flags; | ||
925 | int result; | 808 | int result; |
926 | 809 | ||
810 | spin_lock_irqsave(&chain->lock, flags); | ||
811 | |||
927 | spider_net_release_tx_chain(card, 0); | 812 | spider_net_release_tx_chain(card, 0); |
928 | 813 | ||
929 | descr = spider_net_get_next_tx_descr(card); | 814 | if (chain->head->next == chain->tail->prev) { |
815 | card->netdev_stats.tx_dropped++; | ||
816 | result = NETDEV_TX_LOCKED; | ||
817 | goto out; | ||
818 | } | ||
930 | 819 | ||
931 | if (!descr) | 820 | if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) { |
932 | goto error; | 821 | result = NETDEV_TX_LOCKED; |
822 | goto out; | ||
823 | } | ||
933 | 824 | ||
934 | result = spider_net_prepare_tx_descr(card, descr, skb); | 825 | if (spider_net_prepare_tx_descr(card, skb) != 0) { |
935 | if (result) | 826 | card->netdev_stats.tx_dropped++; |
936 | goto error; | 827 | result = NETDEV_TX_BUSY; |
828 | goto out; | ||
829 | } | ||
830 | |||
831 | result = NETDEV_TX_OK; | ||
937 | 832 | ||
833 | spider_net_kick_tx_dma(card); | ||
938 | card->tx_chain.head = card->tx_chain.head->next; | 834 | card->tx_chain.head = card->tx_chain.head->next; |
939 | 835 | ||
940 | if (spider_net_get_descr_status(descr->prev) != | 836 | out: |
941 | SPIDER_NET_DESCR_CARDOWNED) { | 837 | spin_unlock_irqrestore(&chain->lock, flags); |
942 | /* make sure the current descriptor is in memory. Then | 838 | netif_wake_queue(netdev); |
943 | * kicking it on again makes sense, if the previous is not | 839 | return result; |
944 | * card-owned anymore. Check the previous descriptor twice | 840 | } |
945 | * to omit an mb() in heavy traffic cases */ | ||
946 | mb(); | ||
947 | if (spider_net_get_descr_status(descr->prev) != | ||
948 | SPIDER_NET_DESCR_CARDOWNED) | ||
949 | spider_net_kick_tx_dma(card, descr); | ||
950 | } | ||
951 | 841 | ||
952 | mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER); | 842 | /** |
843 | * spider_net_cleanup_tx_ring - cleans up the TX ring | ||
844 | * @card: card structure | ||
845 | * | ||
846 | * spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use | ||
847 | * interrupts to cleanup our TX ring) and returns sent packets to the stack | ||
848 | * by freeing them | ||
849 | */ | ||
850 | static void | ||
851 | spider_net_cleanup_tx_ring(struct spider_net_card *card) | ||
852 | { | ||
853 | unsigned long flags; | ||
953 | 854 | ||
954 | return NETDEV_TX_OK; | 855 | spin_lock_irqsave(&card->tx_chain.lock, flags); |
955 | 856 | ||
956 | error: | 857 | if ((spider_net_release_tx_chain(card, 0) != 0) && |
957 | card->netdev_stats.tx_dropped++; | 858 | (card->netdev->flags & IFF_UP)) |
958 | return NETDEV_TX_BUSY; | 859 | spider_net_kick_tx_dma(card); |
860 | |||
861 | spin_unlock_irqrestore(&card->tx_chain.lock, flags); | ||
959 | } | 862 | } |
960 | 863 | ||
961 | /** | 864 | /** |
@@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, | |||
1002 | 905 | ||
1003 | /* unmap descriptor */ | 906 | /* unmap descriptor */ |
1004 | pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, | 907 | pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME, |
1005 | PCI_DMA_BIDIRECTIONAL); | 908 | PCI_DMA_FROMDEVICE); |
1006 | 909 | ||
1007 | /* the cases we'll throw away the packet immediately */ | 910 | /* the cases we'll throw away the packet immediately */ |
1008 | if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { | 911 | if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) { |
@@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, | |||
1067 | static int | 970 | static int |
1068 | spider_net_decode_one_descr(struct spider_net_card *card, int napi) | 971 | spider_net_decode_one_descr(struct spider_net_card *card, int napi) |
1069 | { | 972 | { |
1070 | enum spider_net_descr_status status; | 973 | struct spider_net_descr_chain *chain = &card->rx_chain; |
1071 | struct spider_net_descr *descr; | 974 | struct spider_net_descr *descr = chain->tail; |
1072 | struct spider_net_descr_chain *chain; | 975 | int status; |
1073 | int result; | 976 | int result; |
1074 | 977 | ||
1075 | chain = &card->rx_chain; | ||
1076 | descr = chain->tail; | ||
1077 | |||
1078 | status = spider_net_get_descr_status(descr); | 978 | status = spider_net_get_descr_status(descr); |
1079 | 979 | ||
1080 | if (status == SPIDER_NET_DESCR_CARDOWNED) { | 980 | if (status == SPIDER_NET_DESCR_CARDOWNED) { |
@@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) | |||
1103 | card->netdev->name, status); | 1003 | card->netdev->name, status); |
1104 | card->netdev_stats.rx_dropped++; | 1004 | card->netdev_stats.rx_dropped++; |
1105 | pci_unmap_single(card->pdev, descr->buf_addr, | 1005 | pci_unmap_single(card->pdev, descr->buf_addr, |
1106 | SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL); | 1006 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
1107 | dev_kfree_skb_irq(descr->skb); | 1007 | dev_kfree_skb_irq(descr->skb); |
1108 | goto refill; | 1008 | goto refill; |
1109 | } | 1009 | } |
@@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi) | |||
1119 | /* ok, we've got a packet in descr */ | 1019 | /* ok, we've got a packet in descr */ |
1120 | result = spider_net_pass_skb_up(descr, card, napi); | 1020 | result = spider_net_pass_skb_up(descr, card, napi); |
1121 | refill: | 1021 | refill: |
1122 | spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); | 1022 | descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; |
1123 | /* change the descriptor state: */ | 1023 | /* change the descriptor state: */ |
1124 | if (!napi) | 1024 | if (!napi) |
1125 | spider_net_refill_rx_chain(card); | 1025 | spider_net_refill_rx_chain(card); |
@@ -1291,21 +1191,6 @@ spider_net_set_mac(struct net_device *netdev, void *p) | |||
1291 | } | 1191 | } |
1292 | 1192 | ||
1293 | /** | 1193 | /** |
1294 | * spider_net_enable_txdmac - enables a TX DMA controller | ||
1295 | * @card: card structure | ||
1296 | * | ||
1297 | * spider_net_enable_txdmac enables the TX DMA controller by setting the | ||
1298 | * descriptor chain tail address | ||
1299 | */ | ||
1300 | static void | ||
1301 | spider_net_enable_txdmac(struct spider_net_card *card) | ||
1302 | { | ||
1303 | /* assume chain is aligned correctly */ | ||
1304 | spider_net_write_reg(card, SPIDER_NET_GDTDCHA, | ||
1305 | card->tx_chain.tail->bus_addr); | ||
1306 | } | ||
1307 | |||
1308 | /** | ||
1309 | * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt | 1194 | * spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt |
1310 | * @card: card structure | 1195 | * @card: card structure |
1311 | * | 1196 | * |
@@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1653 | { SPIDER_NET_GMRWOLCTRL, 0 }, | 1538 | { SPIDER_NET_GMRWOLCTRL, 0 }, |
1654 | { SPIDER_NET_GTESTMD, 0x10000000 }, | 1539 | { SPIDER_NET_GTESTMD, 0x10000000 }, |
1655 | { SPIDER_NET_GTTQMSK, 0x00400040 }, | 1540 | { SPIDER_NET_GTTQMSK, 0x00400040 }, |
1656 | { SPIDER_NET_GTESTMD, 0 }, | ||
1657 | 1541 | ||
1658 | { SPIDER_NET_GMACINTEN, 0 }, | 1542 | { SPIDER_NET_GMACINTEN, 0 }, |
1659 | 1543 | ||
@@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1692 | 1576 | ||
1693 | spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); | 1577 | spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE); |
1694 | 1578 | ||
1695 | /* set chain tail adress for TX chain */ | ||
1696 | spider_net_enable_txdmac(card); | ||
1697 | |||
1698 | spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, | 1579 | spider_net_write_reg(card, SPIDER_NET_GMACLENLMT, |
1699 | SPIDER_NET_LENLMT_VALUE); | 1580 | SPIDER_NET_LENLMT_VALUE); |
1700 | spider_net_write_reg(card, SPIDER_NET_GMACMODE, | 1581 | spider_net_write_reg(card, SPIDER_NET_GMACMODE, |
@@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card) | |||
1709 | SPIDER_NET_INT1_MASK_VALUE); | 1590 | SPIDER_NET_INT1_MASK_VALUE); |
1710 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, | 1591 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, |
1711 | SPIDER_NET_INT2_MASK_VALUE); | 1592 | SPIDER_NET_INT2_MASK_VALUE); |
1593 | |||
1594 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
1595 | SPIDER_NET_GDTDCEIDIS); | ||
1712 | } | 1596 | } |
1713 | 1597 | ||
1714 | /** | 1598 | /** |
@@ -1728,10 +1612,12 @@ spider_net_open(struct net_device *netdev) | |||
1728 | 1612 | ||
1729 | result = -ENOMEM; | 1613 | result = -ENOMEM; |
1730 | if (spider_net_init_chain(card, &card->tx_chain, | 1614 | if (spider_net_init_chain(card, &card->tx_chain, |
1731 | card->descr, tx_descriptors)) | 1615 | card->descr, |
1616 | PCI_DMA_TODEVICE, tx_descriptors)) | ||
1732 | goto alloc_tx_failed; | 1617 | goto alloc_tx_failed; |
1733 | if (spider_net_init_chain(card, &card->rx_chain, | 1618 | if (spider_net_init_chain(card, &card->rx_chain, |
1734 | card->descr + tx_descriptors, rx_descriptors)) | 1619 | card->descr + tx_descriptors, |
1620 | PCI_DMA_FROMDEVICE, rx_descriptors)) | ||
1735 | goto alloc_rx_failed; | 1621 | goto alloc_rx_failed; |
1736 | 1622 | ||
1737 | /* allocate rx skbs */ | 1623 | /* allocate rx skbs */ |
@@ -1955,6 +1841,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card) | |||
1955 | } | 1841 | } |
1956 | 1842 | ||
1957 | /** | 1843 | /** |
1844 | * spider_net_stop - called upon ifconfig down | ||
1845 | * @netdev: interface device structure | ||
1846 | * | ||
1847 | * always returns 0 | ||
1848 | */ | ||
1849 | int | ||
1850 | spider_net_stop(struct net_device *netdev) | ||
1851 | { | ||
1852 | struct spider_net_card *card = netdev_priv(netdev); | ||
1853 | |||
1854 | tasklet_kill(&card->rxram_full_tl); | ||
1855 | netif_poll_disable(netdev); | ||
1856 | netif_carrier_off(netdev); | ||
1857 | netif_stop_queue(netdev); | ||
1858 | del_timer_sync(&card->tx_timer); | ||
1859 | |||
1860 | /* disable/mask all interrupts */ | ||
1861 | spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); | ||
1862 | spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0); | ||
1863 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); | ||
1864 | |||
1865 | /* free_irq(netdev->irq, netdev);*/ | ||
1866 | free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); | ||
1867 | |||
1868 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | ||
1869 | SPIDER_NET_DMA_TX_FEND_VALUE); | ||
1870 | |||
1871 | /* turn off DMA, force end */ | ||
1872 | spider_net_disable_rxdmac(card); | ||
1873 | |||
1874 | /* release chains */ | ||
1875 | if (spin_trylock(&card->tx_chain.lock)) { | ||
1876 | spider_net_release_tx_chain(card, 1); | ||
1877 | spin_unlock(&card->tx_chain.lock); | ||
1878 | } | ||
1879 | |||
1880 | spider_net_free_chain(card, &card->tx_chain); | ||
1881 | spider_net_free_chain(card, &card->rx_chain); | ||
1882 | |||
1883 | return 0; | ||
1884 | } | ||
1885 | |||
1886 | /** | ||
1958 | * spider_net_tx_timeout_task - task scheduled by the watchdog timeout | 1887 | * spider_net_tx_timeout_task - task scheduled by the watchdog timeout |
1959 | * function (to be called not under interrupt status) | 1888 | * function (to be called not under interrupt status) |
1960 | * @data: data, is interface device structure | 1889 | * @data: data, is interface device structure |
@@ -1982,7 +1911,7 @@ spider_net_tx_timeout_task(void *data) | |||
1982 | goto out; | 1911 | goto out; |
1983 | 1912 | ||
1984 | spider_net_open(netdev); | 1913 | spider_net_open(netdev); |
1985 | spider_net_kick_tx_dma(card, card->tx_chain.head); | 1914 | spider_net_kick_tx_dma(card); |
1986 | netif_device_attach(netdev); | 1915 | netif_device_attach(netdev); |
1987 | 1916 | ||
1988 | out: | 1917 | out: |
@@ -2065,7 +1994,6 @@ spider_net_setup_netdev(struct spider_net_card *card) | |||
2065 | 1994 | ||
2066 | pci_set_drvdata(card->pdev, netdev); | 1995 | pci_set_drvdata(card->pdev, netdev); |
2067 | 1996 | ||
2068 | atomic_set(&card->tx_chain_release,0); | ||
2069 | card->rxram_full_tl.data = (unsigned long) card; | 1997 | card->rxram_full_tl.data = (unsigned long) card; |
2070 | card->rxram_full_tl.func = | 1998 | card->rxram_full_tl.func = |
2071 | (void (*)(unsigned long)) spider_net_handle_rxram_full; | 1999 | (void (*)(unsigned long)) spider_net_handle_rxram_full; |
@@ -2079,7 +2007,7 @@ spider_net_setup_netdev(struct spider_net_card *card) | |||
2079 | 2007 | ||
2080 | spider_net_setup_netdev_ops(netdev); | 2008 | spider_net_setup_netdev_ops(netdev); |
2081 | 2009 | ||
2082 | netdev->features = NETIF_F_HW_CSUM; | 2010 | netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; |
2083 | /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | | 2011 | /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | |
2084 | * NETIF_F_HW_VLAN_FILTER */ | 2012 | * NETIF_F_HW_VLAN_FILTER */ |
2085 | 2013 | ||
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index 3b8d951cf73c..f6dcf180ae3d 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h | |||
@@ -208,7 +208,10 @@ extern char spider_net_driver_name[]; | |||
208 | #define SPIDER_NET_DMA_RX_VALUE 0x80000000 | 208 | #define SPIDER_NET_DMA_RX_VALUE 0x80000000 |
209 | #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 | 209 | #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 |
210 | /* to set TX_DMA_EN */ | 210 | /* to set TX_DMA_EN */ |
211 | #define SPIDER_NET_DMA_TX_VALUE 0x80000000 | 211 | #define SPIDER_NET_TX_DMA_EN 0x80000000 |
212 | #define SPIDER_NET_GDTDCEIDIS 0x00000002 | ||
213 | #define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ | ||
214 | SPIDER_NET_GDTDCEIDIS | ||
212 | #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 | 215 | #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 |
213 | 216 | ||
214 | /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ | 217 | /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ |
@@ -329,55 +332,23 @@ enum spider_net_int2_status { | |||
329 | (~SPIDER_NET_TXINT) & \ | 332 | (~SPIDER_NET_TXINT) & \ |
330 | (~SPIDER_NET_RXINT) ) | 333 | (~SPIDER_NET_RXINT) ) |
331 | 334 | ||
332 | #define SPIDER_NET_GPREXEC 0x80000000 | 335 | #define SPIDER_NET_GPREXEC 0x80000000 |
333 | #define SPIDER_NET_GPRDAT_MASK 0x0000ffff | 336 | #define SPIDER_NET_GPRDAT_MASK 0x0000ffff |
334 | 337 | ||
335 | /* descriptor bits | 338 | #define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000 |
336 | * | 339 | #define SPIDER_NET_DMAC_NOCS 0x00040000 |
337 | * 1010 descriptor ready | 340 | #define SPIDER_NET_DMAC_TCP 0x00020000 |
338 | * 0 descr in middle of chain | 341 | #define SPIDER_NET_DMAC_UDP 0x00030000 |
339 | * 000 fixed to 0 | 342 | #define SPIDER_NET_TXDCEST 0x08000000 |
340 | * | 343 | |
341 | * 0 no interrupt on completion | 344 | #define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000 |
342 | * 000 fixed to 0 | 345 | #define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */ |
343 | * 1 no ipsec processing | 346 | #define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */ |
344 | * 1 last descriptor for this frame | 347 | #define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */ |
345 | * 00 no checksum | 348 | #define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */ |
346 | * 10 tcp checksum | 349 | #define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ |
347 | * 11 udp checksum | 350 | #define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ |
348 | * | 351 | #define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 |
349 | * 00 fixed to 0 | ||
350 | * 0 fixed to 0 | ||
351 | * 0 no interrupt on response errors | ||
352 | * 0 no interrupt on invalid descr | ||
353 | * 0 no interrupt on dma process termination | ||
354 | * 0 no interrupt on descr chain end | ||
355 | * 0 no interrupt on descr complete | ||
356 | * | ||
357 | * 000 fixed to 0 | ||
358 | * 0 response error interrupt status | ||
359 | * 0 invalid descr status | ||
360 | * 0 dma termination status | ||
361 | * 0 descr chain end status | ||
362 | * 0 descr complete status */ | ||
363 | #define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000 | ||
364 | #define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000 | ||
365 | #define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000 | ||
366 | #define SPIDER_NET_DESCR_IND_PROC_SHIFT 28 | ||
367 | #define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff | ||
368 | |||
369 | /* descr ready, descr is in middle of chain, get interrupt on completion */ | ||
370 | #define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 | ||
371 | |||
372 | enum spider_net_descr_status { | ||
373 | SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ | ||
374 | SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ | ||
375 | SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */ | ||
376 | SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */ | ||
377 | SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */ | ||
378 | SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */ | ||
379 | SPIDER_NET_DESCR_NOT_IN_USE /* any other value */ | ||
380 | }; | ||
381 | 352 | ||
382 | struct spider_net_descr { | 353 | struct spider_net_descr { |
383 | /* as defined by the hardware */ | 354 | /* as defined by the hardware */ |
@@ -398,7 +369,7 @@ struct spider_net_descr { | |||
398 | } __attribute__((aligned(32))); | 369 | } __attribute__((aligned(32))); |
399 | 370 | ||
400 | struct spider_net_descr_chain { | 371 | struct spider_net_descr_chain { |
401 | /* we walk from tail to head */ | 372 | spinlock_t lock; |
402 | struct spider_net_descr *head; | 373 | struct spider_net_descr *head; |
403 | struct spider_net_descr *tail; | 374 | struct spider_net_descr *tail; |
404 | }; | 375 | }; |
@@ -453,8 +424,6 @@ struct spider_net_card { | |||
453 | 424 | ||
454 | struct spider_net_descr_chain tx_chain; | 425 | struct spider_net_descr_chain tx_chain; |
455 | struct spider_net_descr_chain rx_chain; | 426 | struct spider_net_descr_chain rx_chain; |
456 | atomic_t rx_chain_refill; | ||
457 | atomic_t tx_chain_release; | ||
458 | 427 | ||
459 | struct net_device_stats netdev_stats; | 428 | struct net_device_stats netdev_stats; |
460 | 429 | ||