aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/realtek/8139cp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/realtek/8139cp.c')
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c111
1 files changed, 55 insertions, 56 deletions
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d79e33b3c191..686334f4588d 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -157,6 +157,7 @@ enum {
157 NWayAdvert = 0x66, /* MII ADVERTISE */ 157 NWayAdvert = 0x66, /* MII ADVERTISE */
158 NWayLPAR = 0x68, /* MII LPA */ 158 NWayLPAR = 0x68, /* MII LPA */
159 NWayExpansion = 0x6A, /* MII Expansion */ 159 NWayExpansion = 0x6A, /* MII Expansion */
160 TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
160 Config5 = 0xD8, /* Config5 */ 161 Config5 = 0xD8, /* Config5 */
161 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */ 162 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
162 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */ 163 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
@@ -341,6 +342,7 @@ struct cp_private {
341 unsigned tx_tail; 342 unsigned tx_tail;
342 struct cp_desc *tx_ring; 343 struct cp_desc *tx_ring;
343 struct sk_buff *tx_skb[CP_TX_RING_SIZE]; 344 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
345 u32 tx_opts[CP_TX_RING_SIZE];
344 346
345 unsigned rx_buf_sz; 347 unsigned rx_buf_sz;
346 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */ 348 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
@@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp)
665 BUG_ON(!skb); 667 BUG_ON(!skb);
666 668
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), 669 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
668 le32_to_cpu(txd->opts1) & 0xffff, 670 cp->tx_opts[tx_tail] & 0xffff,
669 PCI_DMA_TODEVICE); 671 PCI_DMA_TODEVICE);
670 672
671 if (status & LastFrag) { 673 if (status & LastFrag) {
@@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
733{ 735{
734 struct cp_private *cp = netdev_priv(dev); 736 struct cp_private *cp = netdev_priv(dev);
735 unsigned entry; 737 unsigned entry;
736 u32 eor, flags; 738 u32 eor, opts1;
737 unsigned long intr_flags; 739 unsigned long intr_flags;
738 __le32 opts2; 740 __le32 opts2;
739 int mss = 0; 741 int mss = 0;
@@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
753 mss = skb_shinfo(skb)->gso_size; 755 mss = skb_shinfo(skb)->gso_size;
754 756
755 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb)); 757 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
758 opts1 = DescOwn;
759 if (mss)
760 opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
761 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
762 const struct iphdr *ip = ip_hdr(skb);
763 if (ip->protocol == IPPROTO_TCP)
764 opts1 |= IPCS | TCPCS;
765 else if (ip->protocol == IPPROTO_UDP)
766 opts1 |= IPCS | UDPCS;
767 else {
768 WARN_ONCE(1,
769 "Net bug: asked to checksum invalid Legacy IP packet\n");
770 goto out_dma_error;
771 }
772 }
756 773
757 if (skb_shinfo(skb)->nr_frags == 0) { 774 if (skb_shinfo(skb)->nr_frags == 0) {
758 struct cp_desc *txd = &cp->tx_ring[entry]; 775 struct cp_desc *txd = &cp->tx_ring[entry];
@@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
768 txd->addr = cpu_to_le64(mapping); 785 txd->addr = cpu_to_le64(mapping);
769 wmb(); 786 wmb();
770 787
771 flags = eor | len | DescOwn | FirstFrag | LastFrag; 788 opts1 |= eor | len | FirstFrag | LastFrag;
772
773 if (mss)
774 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
775 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
776 const struct iphdr *ip = ip_hdr(skb);
777 if (ip->protocol == IPPROTO_TCP)
778 flags |= IPCS | TCPCS;
779 else if (ip->protocol == IPPROTO_UDP)
780 flags |= IPCS | UDPCS;
781 else
782 WARN_ON(1); /* we need a WARN() */
783 }
784 789
785 txd->opts1 = cpu_to_le32(flags); 790 txd->opts1 = cpu_to_le32(opts1);
786 wmb(); 791 wmb();
787 792
788 cp->tx_skb[entry] = skb; 793 cp->tx_skb[entry] = skb;
789 entry = NEXT_TX(entry); 794 cp->tx_opts[entry] = opts1;
795 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
796 entry, skb->len);
790 } else { 797 } else {
791 struct cp_desc *txd; 798 struct cp_desc *txd;
792 u32 first_len, first_eor; 799 u32 first_len, first_eor, ctrl;
793 dma_addr_t first_mapping; 800 dma_addr_t first_mapping;
794 int frag, first_entry = entry; 801 int frag, first_entry = entry;
795 const struct iphdr *ip = ip_hdr(skb);
796 802
797 /* We must give this initial chunk to the device last. 803 /* We must give this initial chunk to the device last.
798 * Otherwise we could race with the device. 804 * Otherwise we could race with the device.
@@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
805 goto out_dma_error; 811 goto out_dma_error;
806 812
807 cp->tx_skb[entry] = skb; 813 cp->tx_skb[entry] = skb;
808 entry = NEXT_TX(entry);
809 814
810 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 815 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
811 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 816 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
812 u32 len; 817 u32 len;
813 u32 ctrl;
814 dma_addr_t mapping; 818 dma_addr_t mapping;
815 819
820 entry = NEXT_TX(entry);
821
816 len = skb_frag_size(this_frag); 822 len = skb_frag_size(this_frag);
817 mapping = dma_map_single(&cp->pdev->dev, 823 mapping = dma_map_single(&cp->pdev->dev,
818 skb_frag_address(this_frag), 824 skb_frag_address(this_frag),
@@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
824 830
825 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; 831 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
826 832
827 ctrl = eor | len | DescOwn; 833 ctrl = opts1 | eor | len;
828
829 if (mss)
830 ctrl |= LargeSend |
831 ((mss & MSSMask) << MSSShift);
832 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
833 if (ip->protocol == IPPROTO_TCP)
834 ctrl |= IPCS | TCPCS;
835 else if (ip->protocol == IPPROTO_UDP)
836 ctrl |= IPCS | UDPCS;
837 else
838 BUG();
839 }
840 834
841 if (frag == skb_shinfo(skb)->nr_frags - 1) 835 if (frag == skb_shinfo(skb)->nr_frags - 1)
842 ctrl |= LastFrag; 836 ctrl |= LastFrag;
@@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
849 txd->opts1 = cpu_to_le32(ctrl); 843 txd->opts1 = cpu_to_le32(ctrl);
850 wmb(); 844 wmb();
851 845
846 cp->tx_opts[entry] = ctrl;
852 cp->tx_skb[entry] = skb; 847 cp->tx_skb[entry] = skb;
853 entry = NEXT_TX(entry);
854 } 848 }
855 849
856 txd = &cp->tx_ring[first_entry]; 850 txd = &cp->tx_ring[first_entry];
@@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
858 txd->addr = cpu_to_le64(first_mapping); 852 txd->addr = cpu_to_le64(first_mapping);
859 wmb(); 853 wmb();
860 854
861 if (skb->ip_summed == CHECKSUM_PARTIAL) { 855 ctrl = opts1 | first_eor | first_len | FirstFrag;
862 if (ip->protocol == IPPROTO_TCP) 856 txd->opts1 = cpu_to_le32(ctrl);
863 txd->opts1 = cpu_to_le32(first_eor | first_len |
864 FirstFrag | DescOwn |
865 IPCS | TCPCS);
866 else if (ip->protocol == IPPROTO_UDP)
867 txd->opts1 = cpu_to_le32(first_eor | first_len |
868 FirstFrag | DescOwn |
869 IPCS | UDPCS);
870 else
871 BUG();
872 } else
873 txd->opts1 = cpu_to_le32(first_eor | first_len |
874 FirstFrag | DescOwn);
875 wmb(); 857 wmb();
858
859 cp->tx_opts[first_entry] = ctrl;
860 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
861 first_entry, entry, skb->len);
876 } 862 }
877 cp->tx_head = entry; 863 cp->tx_head = NEXT_TX(entry);
878 864
879 netdev_sent_queue(dev, skb->len); 865 netdev_sent_queue(dev, skb->len);
880 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
881 entry, skb->len);
882 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 866 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
883 netif_stop_queue(dev); 867 netif_stop_queue(dev);
884 868
@@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_private *cp)
1115{ 1099{
1116 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1100 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1117 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); 1101 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1102 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1118 1103
1119 cp_init_rings_index(cp); 1104 cp_init_rings_index(cp);
1120 1105
@@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp)
1151 desc = cp->rx_ring + i; 1136 desc = cp->rx_ring + i;
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), 1137 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1153 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1138 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1154 dev_kfree_skb(cp->rx_skb[i]); 1139 dev_kfree_skb_any(cp->rx_skb[i]);
1155 } 1140 }
1156 } 1141 }
1157 1142
@@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_private *cp)
1164 le32_to_cpu(desc->opts1) & 0xffff, 1149 le32_to_cpu(desc->opts1) & 0xffff,
1165 PCI_DMA_TODEVICE); 1150 PCI_DMA_TODEVICE);
1166 if (le32_to_cpu(desc->opts1) & LastFrag) 1151 if (le32_to_cpu(desc->opts1) & LastFrag)
1167 dev_kfree_skb(skb); 1152 dev_kfree_skb_any(skb);
1168 cp->dev->stats.tx_dropped++; 1153 cp->dev->stats.tx_dropped++;
1169 } 1154 }
1170 } 1155 }
@@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_private *cp)
1172 1157
1173 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); 1158 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1174 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1159 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1160 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1175 1161
1176 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); 1162 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1177 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); 1163 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
@@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_device *dev)
1249{ 1235{
1250 struct cp_private *cp = netdev_priv(dev); 1236 struct cp_private *cp = netdev_priv(dev);
1251 unsigned long flags; 1237 unsigned long flags;
1252 int rc; 1238 int rc, i;
1253 1239
1254 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n", 1240 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1255 cpr8(Cmd), cpr16(CpCmd), 1241 cpr8(Cmd), cpr16(CpCmd),
@@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_device *dev)
1257 1243
1258 spin_lock_irqsave(&cp->lock, flags); 1244 spin_lock_irqsave(&cp->lock, flags);
1259 1245
1246 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1247 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
1248 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1249 netif_dbg(cp, tx_err, cp->dev,
1250 "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
1251 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
1252 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
1253 le64_to_cpu(cp->tx_ring[i].addr),
1254 cp->tx_skb[i]);
1255 }
1256
1260 cp_stop_hw(cp); 1257 cp_stop_hw(cp);
1261 cp_clean_rings(cp); 1258 cp_clean_rings(cp);
1262 rc = cp_init_rings(cp); 1259 rc = cp_init_rings(cp);
1263 cp_start_hw(cp); 1260 cp_start_hw(cp);
1264 cp_enable_irq(cp); 1261 __cp_set_rx_mode(dev);
1262 cpw16_f(IntrMask, cp_norx_intr_mask);
1265 1263
1266 netif_wake_queue(dev); 1264 netif_wake_queue(dev);
1265 napi_schedule_irqoff(&cp->napi);
1267 1266
1268 spin_unlock_irqrestore(&cp->lock, flags); 1267 spin_unlock_irqrestore(&cp->lock, flags);
1269} 1268}