diff options
author | Linas Vepstas <linas@austin.ibm.com> | 2006-10-10 17:14:29 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-10-11 04:04:26 -0400 |
commit | 9cc7bf7edf50a8a6b456b337aff97fe780ae369b (patch) | |
tree | 9512ef8f08a21ab7fa9398fdd31f1ad1810a4702 /drivers/net/spider_net.c | |
parent | 68a8c609b3071c2441fa64f584d15311f2c10e61 (diff) |
[PATCH] powerpc/cell spidernet refine locking
The transmit side of the spider ethernet driver currently
places locks around some very large chunks of code. This
results in a fair amount of lock contention is some cases.
This patch makes the locks much more fine-grained, protecting
only the cirtical sections. One lock is used to protect
three locations: the queue head and tail pointers, and the
queue low-watermark location.
Signed-off-by: Linas Vepstas <linas@austin.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: James K Lewis <jklewis@us.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/spider_net.c')
-rw-r--r-- | drivers/net/spider_net.c | 95 |
1 files changed, 43 insertions, 52 deletions
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index e429abc1e947..f8d7d0d91a6d 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -646,8 +646,9 @@ static int | |||
646 | spider_net_prepare_tx_descr(struct spider_net_card *card, | 646 | spider_net_prepare_tx_descr(struct spider_net_card *card, |
647 | struct sk_buff *skb) | 647 | struct sk_buff *skb) |
648 | { | 648 | { |
649 | struct spider_net_descr *descr = card->tx_chain.head; | 649 | struct spider_net_descr *descr; |
650 | dma_addr_t buf; | 650 | dma_addr_t buf; |
651 | unsigned long flags; | ||
651 | int length; | 652 | int length; |
652 | 653 | ||
653 | length = skb->len; | 654 | length = skb->len; |
@@ -666,6 +667,10 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
666 | return -ENOMEM; | 667 | return -ENOMEM; |
667 | } | 668 | } |
668 | 669 | ||
670 | spin_lock_irqsave(&card->tx_chain.lock, flags); | ||
671 | descr = card->tx_chain.head; | ||
672 | card->tx_chain.head = descr->next; | ||
673 | |||
669 | descr->buf_addr = buf; | 674 | descr->buf_addr = buf; |
670 | descr->buf_size = length; | 675 | descr->buf_size = length; |
671 | descr->next_descr_addr = 0; | 676 | descr->next_descr_addr = 0; |
@@ -674,6 +679,8 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
674 | 679 | ||
675 | descr->dmac_cmd_status = | 680 | descr->dmac_cmd_status = |
676 | SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; | 681 | SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; |
682 | spin_unlock_irqrestore(&card->tx_chain.lock, flags); | ||
683 | |||
677 | if (skb->protocol == htons(ETH_P_IP)) | 684 | if (skb->protocol == htons(ETH_P_IP)) |
678 | switch (skb->nh.iph->protocol) { | 685 | switch (skb->nh.iph->protocol) { |
679 | case IPPROTO_TCP: | 686 | case IPPROTO_TCP: |
@@ -691,42 +698,17 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
691 | return 0; | 698 | return 0; |
692 | } | 699 | } |
693 | 700 | ||
694 | /** | ||
695 | * spider_net_release_tx_descr - processes a used tx descriptor | ||
696 | * @card: card structure | ||
697 | * @descr: descriptor to release | ||
698 | * | ||
699 | * releases a used tx descriptor (unmapping, freeing of skb) | ||
700 | */ | ||
701 | static inline void | ||
702 | spider_net_release_tx_descr(struct spider_net_card *card) | ||
703 | { | ||
704 | struct spider_net_descr *descr = card->tx_chain.tail; | ||
705 | struct sk_buff *skb; | ||
706 | unsigned int len; | ||
707 | |||
708 | card->tx_chain.tail = card->tx_chain.tail->next; | ||
709 | descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; | ||
710 | |||
711 | /* unmap the skb */ | ||
712 | skb = descr->skb; | ||
713 | if (!skb) | ||
714 | return; | ||
715 | len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; | ||
716 | pci_unmap_single(card->pdev, descr->buf_addr, len, | ||
717 | PCI_DMA_TODEVICE); | ||
718 | dev_kfree_skb(skb); | ||
719 | } | ||
720 | |||
721 | static void | 701 | static void |
722 | spider_net_set_low_watermark(struct spider_net_card *card) | 702 | spider_net_set_low_watermark(struct spider_net_card *card) |
723 | { | 703 | { |
704 | unsigned long flags; | ||
724 | int status; | 705 | int status; |
725 | int cnt=0; | 706 | int cnt=0; |
726 | int i; | 707 | int i; |
727 | struct spider_net_descr *descr = card->tx_chain.tail; | 708 | struct spider_net_descr *descr = card->tx_chain.tail; |
728 | 709 | ||
729 | /* Measure the length of the queue. */ | 710 | /* Measure the length of the queue. Measurement does not |
711 | * need to be precise -- does not need a lock. */ | ||
730 | while (descr != card->tx_chain.head) { | 712 | while (descr != card->tx_chain.head) { |
731 | status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; | 713 | status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE; |
732 | if (status == SPIDER_NET_DESCR_NOT_IN_USE) | 714 | if (status == SPIDER_NET_DESCR_NOT_IN_USE) |
@@ -746,11 +728,13 @@ spider_net_set_low_watermark(struct spider_net_card *card) | |||
746 | descr = descr->next; | 728 | descr = descr->next; |
747 | 729 | ||
748 | /* Set the new watermark, clear the old watermark */ | 730 | /* Set the new watermark, clear the old watermark */ |
731 | spin_lock_irqsave(&card->tx_chain.lock, flags); | ||
749 | descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG; | 732 | descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG; |
750 | if (card->low_watermark && card->low_watermark != descr) | 733 | if (card->low_watermark && card->low_watermark != descr) |
751 | card->low_watermark->dmac_cmd_status = | 734 | card->low_watermark->dmac_cmd_status = |
752 | card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG; | 735 | card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG; |
753 | card->low_watermark = descr; | 736 | card->low_watermark = descr; |
737 | spin_unlock_irqrestore(&card->tx_chain.lock, flags); | ||
754 | } | 738 | } |
755 | 739 | ||
756 | /** | 740 | /** |
@@ -769,21 +753,31 @@ static int | |||
769 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | 753 | spider_net_release_tx_chain(struct spider_net_card *card, int brutal) |
770 | { | 754 | { |
771 | struct spider_net_descr_chain *chain = &card->tx_chain; | 755 | struct spider_net_descr_chain *chain = &card->tx_chain; |
756 | struct spider_net_descr *descr; | ||
757 | struct sk_buff *skb; | ||
758 | u32 buf_addr; | ||
759 | unsigned long flags; | ||
772 | int status; | 760 | int status; |
773 | 761 | ||
774 | spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR); | 762 | spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR); |
775 | 763 | ||
776 | while (chain->tail != chain->head) { | 764 | while (chain->tail != chain->head) { |
777 | status = spider_net_get_descr_status(chain->tail); | 765 | spin_lock_irqsave(&chain->lock, flags); |
766 | descr = chain->tail; | ||
767 | |||
768 | status = spider_net_get_descr_status(descr); | ||
778 | switch (status) { | 769 | switch (status) { |
779 | case SPIDER_NET_DESCR_COMPLETE: | 770 | case SPIDER_NET_DESCR_COMPLETE: |
780 | card->netdev_stats.tx_packets++; | 771 | card->netdev_stats.tx_packets++; |
781 | card->netdev_stats.tx_bytes += chain->tail->skb->len; | 772 | card->netdev_stats.tx_bytes += descr->skb->len; |
782 | break; | 773 | break; |
783 | 774 | ||
784 | case SPIDER_NET_DESCR_CARDOWNED: | 775 | case SPIDER_NET_DESCR_CARDOWNED: |
785 | if (!brutal) | 776 | if (!brutal) { |
777 | spin_unlock_irqrestore(&chain->lock, flags); | ||
786 | return 1; | 778 | return 1; |
779 | } | ||
780 | |||
787 | /* fallthrough, if we release the descriptors | 781 | /* fallthrough, if we release the descriptors |
788 | * brutally (then we don't care about | 782 | * brutally (then we don't care about |
789 | * SPIDER_NET_DESCR_CARDOWNED) */ | 783 | * SPIDER_NET_DESCR_CARDOWNED) */ |
@@ -800,12 +794,25 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) | |||
800 | 794 | ||
801 | default: | 795 | default: |
802 | card->netdev_stats.tx_dropped++; | 796 | card->netdev_stats.tx_dropped++; |
803 | if (!brutal) | 797 | if (!brutal) { |
798 | spin_unlock_irqrestore(&chain->lock, flags); | ||
804 | return 1; | 799 | return 1; |
800 | } | ||
805 | } | 801 | } |
806 | spider_net_release_tx_descr(card); | ||
807 | } | ||
808 | 802 | ||
803 | chain->tail = descr->next; | ||
804 | descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; | ||
805 | skb = descr->skb; | ||
806 | buf_addr = descr->buf_addr; | ||
807 | spin_unlock_irqrestore(&chain->lock, flags); | ||
808 | |||
809 | /* unmap the skb */ | ||
810 | if (skb) { | ||
811 | int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; | ||
812 | pci_unmap_single(card->pdev, buf_addr, len, PCI_DMA_TODEVICE); | ||
813 | dev_kfree_skb(skb); | ||
814 | } | ||
815 | } | ||
809 | return 0; | 816 | return 0; |
810 | } | 817 | } |
811 | 818 | ||
@@ -857,27 +864,19 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
857 | { | 864 | { |
858 | struct spider_net_card *card = netdev_priv(netdev); | 865 | struct spider_net_card *card = netdev_priv(netdev); |
859 | struct spider_net_descr_chain *chain = &card->tx_chain; | 866 | struct spider_net_descr_chain *chain = &card->tx_chain; |
860 | struct spider_net_descr *descr = chain->head; | ||
861 | unsigned long flags; | ||
862 | |||
863 | spin_lock_irqsave(&chain->lock, flags); | ||
864 | 867 | ||
865 | spider_net_release_tx_chain(card, 0); | 868 | spider_net_release_tx_chain(card, 0); |
866 | 869 | ||
867 | if ((chain->head->next == chain->tail->prev) || | 870 | if ((chain->head->next == chain->tail->prev) || |
868 | (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) || | ||
869 | (spider_net_prepare_tx_descr(card, skb) != 0)) { | 871 | (spider_net_prepare_tx_descr(card, skb) != 0)) { |
870 | 872 | ||
871 | card->netdev_stats.tx_dropped++; | 873 | card->netdev_stats.tx_dropped++; |
872 | spin_unlock_irqrestore(&chain->lock, flags); | ||
873 | netif_stop_queue(netdev); | 874 | netif_stop_queue(netdev); |
874 | return NETDEV_TX_BUSY; | 875 | return NETDEV_TX_BUSY; |
875 | } | 876 | } |
876 | 877 | ||
877 | spider_net_set_low_watermark(card); | 878 | spider_net_set_low_watermark(card); |
878 | spider_net_kick_tx_dma(card); | 879 | spider_net_kick_tx_dma(card); |
879 | card->tx_chain.head = card->tx_chain.head->next; | ||
880 | spin_unlock_irqrestore(&chain->lock, flags); | ||
881 | return NETDEV_TX_OK; | 880 | return NETDEV_TX_OK; |
882 | } | 881 | } |
883 | 882 | ||
@@ -893,16 +892,11 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
893 | static void | 892 | static void |
894 | spider_net_cleanup_tx_ring(struct spider_net_card *card) | 893 | spider_net_cleanup_tx_ring(struct spider_net_card *card) |
895 | { | 894 | { |
896 | unsigned long flags; | ||
897 | |||
898 | spin_lock_irqsave(&card->tx_chain.lock, flags); | ||
899 | |||
900 | if ((spider_net_release_tx_chain(card, 0) != 0) && | 895 | if ((spider_net_release_tx_chain(card, 0) != 0) && |
901 | (card->netdev->flags & IFF_UP)) { | 896 | (card->netdev->flags & IFF_UP)) { |
902 | spider_net_kick_tx_dma(card); | 897 | spider_net_kick_tx_dma(card); |
903 | netif_wake_queue(card->netdev); | 898 | netif_wake_queue(card->netdev); |
904 | } | 899 | } |
905 | spin_unlock_irqrestore(&card->tx_chain.lock, flags); | ||
906 | } | 900 | } |
907 | 901 | ||
908 | /** | 902 | /** |
@@ -1930,10 +1924,7 @@ spider_net_stop(struct net_device *netdev) | |||
1930 | spider_net_disable_rxdmac(card); | 1924 | spider_net_disable_rxdmac(card); |
1931 | 1925 | ||
1932 | /* release chains */ | 1926 | /* release chains */ |
1933 | if (spin_trylock(&card->tx_chain.lock)) { | 1927 | spider_net_release_tx_chain(card, 1); |
1934 | spider_net_release_tx_chain(card, 1); | ||
1935 | spin_unlock(&card->tx_chain.lock); | ||
1936 | } | ||
1937 | 1928 | ||
1938 | spider_net_free_chain(card, &card->tx_chain); | 1929 | spider_net_free_chain(card, &card->tx_chain); |
1939 | spider_net_free_chain(card, &card->rx_chain); | 1930 | spider_net_free_chain(card, &card->rx_chain); |