diff options
-rw-r--r-- | drivers/net/ethernet/realtek/r8169.c | 157 |
1 files changed, 145 insertions, 12 deletions
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e5a0bdbfbb2b..51c78ce27b37 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <linux/firmware.h> | 27 | #include <linux/firmware.h> |
28 | #include <linux/pci-aspm.h> | 28 | #include <linux/pci-aspm.h> |
29 | #include <linux/prefetch.h> | 29 | #include <linux/prefetch.h> |
30 | #include <linux/ipv6.h> | ||
31 | #include <net/ip6_checksum.h> | ||
30 | 32 | ||
31 | #include <asm/io.h> | 33 | #include <asm/io.h> |
32 | #include <asm/irq.h> | 34 | #include <asm/irq.h> |
@@ -628,11 +630,16 @@ enum rtl_tx_desc_bit_0 { | |||
628 | enum rtl_tx_desc_bit_1 { | 630 | enum rtl_tx_desc_bit_1 { |
629 | /* First doubleword. */ | 631 | /* First doubleword. */ |
630 | TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ | 632 | TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ |
633 | TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ | ||
631 | #define GTTCPHO_SHIFT 18 | 634 | #define GTTCPHO_SHIFT 18 |
635 | #define GTTCPHO_MAX 0x7fU | ||
632 | 636 | ||
633 | /* Second doubleword. */ | 637 | /* Second doubleword. */ |
638 | #define TCPHO_SHIFT 18 | ||
639 | #define TCPHO_MAX 0x3ffU | ||
634 | #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ | 640 | #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ |
635 | TD1_IP_CS = (1 << 29), /* Calculate IP checksum */ | 641 | TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ |
642 | TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ | ||
636 | TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ | 643 | TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ |
637 | TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ | 644 | TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ |
638 | }; | 645 | }; |
@@ -5920,6 +5927,82 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) | |||
5920 | return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; | 5927 | return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; |
5921 | } | 5928 | } |
5922 | 5929 | ||
5930 | static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | ||
5931 | struct net_device *dev); | ||
5932 | /* r8169_csum_workaround() | ||
5933 | * The hw limites the value the transport offset. When the offset is out of the | ||
5934 | * range, calculate the checksum by sw. | ||
5935 | */ | ||
5936 | static void r8169_csum_workaround(struct rtl8169_private *tp, | ||
5937 | struct sk_buff *skb) | ||
5938 | { | ||
5939 | if (skb_shinfo(skb)->gso_size) { | ||
5940 | netdev_features_t features = tp->dev->features; | ||
5941 | struct sk_buff *segs, *nskb; | ||
5942 | |||
5943 | features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); | ||
5944 | segs = skb_gso_segment(skb, features); | ||
5945 | if (IS_ERR(segs) || !segs) | ||
5946 | goto drop; | ||
5947 | |||
5948 | do { | ||
5949 | nskb = segs; | ||
5950 | segs = segs->next; | ||
5951 | nskb->next = NULL; | ||
5952 | rtl8169_start_xmit(nskb, tp->dev); | ||
5953 | } while (segs); | ||
5954 | |||
5955 | dev_kfree_skb(skb); | ||
5956 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
5957 | if (skb_checksum_help(skb) < 0) | ||
5958 | goto drop; | ||
5959 | |||
5960 | rtl8169_start_xmit(skb, tp->dev); | ||
5961 | } else { | ||
5962 | struct net_device_stats *stats; | ||
5963 | |||
5964 | drop: | ||
5965 | stats = &tp->dev->stats; | ||
5966 | stats->tx_dropped++; | ||
5967 | dev_kfree_skb(skb); | ||
5968 | } | ||
5969 | } | ||
5970 | |||
5971 | /* msdn_giant_send_check() | ||
5972 | * According to the document of microsoft, the TCP Pseudo Header excludes the | ||
5973 | * packet length for IPv6 TCP large packets. | ||
5974 | */ | ||
5975 | static int msdn_giant_send_check(struct sk_buff *skb) | ||
5976 | { | ||
5977 | const struct ipv6hdr *ipv6h; | ||
5978 | struct tcphdr *th; | ||
5979 | int ret; | ||
5980 | |||
5981 | ret = skb_cow_head(skb, 0); | ||
5982 | if (ret) | ||
5983 | return ret; | ||
5984 | |||
5985 | ipv6h = ipv6_hdr(skb); | ||
5986 | th = tcp_hdr(skb); | ||
5987 | |||
5988 | th->check = 0; | ||
5989 | th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); | ||
5990 | |||
5991 | return ret; | ||
5992 | } | ||
5993 | |||
5994 | static inline __be16 get_protocol(struct sk_buff *skb) | ||
5995 | { | ||
5996 | __be16 protocol; | ||
5997 | |||
5998 | if (skb->protocol == htons(ETH_P_8021Q)) | ||
5999 | protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; | ||
6000 | else | ||
6001 | protocol = skb->protocol; | ||
6002 | |||
6003 | return protocol; | ||
6004 | } | ||
6005 | |||
5923 | static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp, | 6006 | static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp, |
5924 | struct sk_buff *skb, u32 *opts) | 6007 | struct sk_buff *skb, u32 *opts) |
5925 | { | 6008 | { |
@@ -5949,21 +6032,69 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, | |||
5949 | u32 mss = skb_shinfo(skb)->gso_size; | 6032 | u32 mss = skb_shinfo(skb)->gso_size; |
5950 | 6033 | ||
5951 | if (mss) { | 6034 | if (mss) { |
5952 | opts[0] |= TD1_GTSENV4; | 6035 | if (transport_offset > GTTCPHO_MAX) { |
6036 | netif_warn(tp, tx_err, tp->dev, | ||
6037 | "Invalid transport offset 0x%x for TSO\n", | ||
6038 | transport_offset); | ||
6039 | return false; | ||
6040 | } | ||
6041 | |||
6042 | switch (get_protocol(skb)) { | ||
6043 | case htons(ETH_P_IP): | ||
6044 | opts[0] |= TD1_GTSENV4; | ||
6045 | break; | ||
6046 | |||
6047 | case htons(ETH_P_IPV6): | ||
6048 | if (msdn_giant_send_check(skb)) | ||
6049 | return false; | ||
6050 | |||
6051 | opts[0] |= TD1_GTSENV6; | ||
6052 | break; | ||
6053 | |||
6054 | default: | ||
6055 | WARN_ON_ONCE(1); | ||
6056 | break; | ||
6057 | } | ||
6058 | |||
5953 | opts[0] |= transport_offset << GTTCPHO_SHIFT; | 6059 | opts[0] |= transport_offset << GTTCPHO_SHIFT; |
5954 | opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT; | 6060 | opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT; |
5955 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 6061 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
5956 | const struct iphdr *ip = ip_hdr(skb); | 6062 | u8 ip_protocol; |
5957 | 6063 | ||
5958 | if (unlikely(rtl_test_hw_pad_bug(tp, skb))) | 6064 | if (unlikely(rtl_test_hw_pad_bug(tp, skb))) |
5959 | return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); | 6065 | return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); |
5960 | 6066 | ||
5961 | if (ip->protocol == IPPROTO_TCP) | 6067 | if (transport_offset > TCPHO_MAX) { |
5962 | opts[1] |= TD1_IP_CS | TD1_TCP_CS; | 6068 | netif_warn(tp, tx_err, tp->dev, |
5963 | else if (ip->protocol == IPPROTO_UDP) | 6069 | "Invalid transport offset 0x%x\n", |
5964 | opts[1] |= TD1_IP_CS | TD1_UDP_CS; | 6070 | transport_offset); |
6071 | return false; | ||
6072 | } | ||
6073 | |||
6074 | switch (get_protocol(skb)) { | ||
6075 | case htons(ETH_P_IP): | ||
6076 | opts[1] |= TD1_IPv4_CS; | ||
6077 | ip_protocol = ip_hdr(skb)->protocol; | ||
6078 | break; | ||
6079 | |||
6080 | case htons(ETH_P_IPV6): | ||
6081 | opts[1] |= TD1_IPv6_CS; | ||
6082 | ip_protocol = ipv6_hdr(skb)->nexthdr; | ||
6083 | break; | ||
6084 | |||
6085 | default: | ||
6086 | ip_protocol = IPPROTO_RAW; | ||
6087 | break; | ||
6088 | } | ||
6089 | |||
6090 | if (ip_protocol == IPPROTO_TCP) | ||
6091 | opts[1] |= TD1_TCP_CS; | ||
6092 | else if (ip_protocol == IPPROTO_UDP) | ||
6093 | opts[1] |= TD1_UDP_CS; | ||
5965 | else | 6094 | else |
5966 | WARN_ON_ONCE(1); | 6095 | WARN_ON_ONCE(1); |
6096 | |||
6097 | opts[1] |= transport_offset << TCPHO_SHIFT; | ||
5967 | } else { | 6098 | } else { |
5968 | if (unlikely(rtl_test_hw_pad_bug(tp, skb))) | 6099 | if (unlikely(rtl_test_hw_pad_bug(tp, skb))) |
5969 | return rtl_skb_pad(skb); | 6100 | return rtl_skb_pad(skb); |
@@ -5996,8 +6127,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
5996 | opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); | 6127 | opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); |
5997 | opts[0] = DescOwn; | 6128 | opts[0] = DescOwn; |
5998 | 6129 | ||
5999 | if (!tp->tso_csum(tp, skb, opts)) | 6130 | if (!tp->tso_csum(tp, skb, opts)) { |
6000 | goto err_update_stats; | 6131 | r8169_csum_workaround(tp, skb); |
6132 | return NETDEV_TX_OK; | ||
6133 | } | ||
6001 | 6134 | ||
6002 | len = skb_headlen(skb); | 6135 | len = skb_headlen(skb); |
6003 | mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); | 6136 | mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); |
@@ -6062,7 +6195,6 @@ err_dma_1: | |||
6062 | rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); | 6195 | rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); |
6063 | err_dma_0: | 6196 | err_dma_0: |
6064 | dev_kfree_skb_any(skb); | 6197 | dev_kfree_skb_any(skb); |
6065 | err_update_stats: | ||
6066 | dev->stats.tx_dropped++; | 6198 | dev->stats.tx_dropped++; |
6067 | return NETDEV_TX_OK; | 6199 | return NETDEV_TX_OK; |
6068 | 6200 | ||
@@ -7149,9 +7281,10 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7149 | 7281 | ||
7150 | if (tp->txd_version == RTL_TD_0) | 7282 | if (tp->txd_version == RTL_TD_0) |
7151 | tp->tso_csum = rtl8169_tso_csum_v1; | 7283 | tp->tso_csum = rtl8169_tso_csum_v1; |
7152 | else if (tp->txd_version == RTL_TD_1) | 7284 | else if (tp->txd_version == RTL_TD_1) { |
7153 | tp->tso_csum = rtl8169_tso_csum_v2; | 7285 | tp->tso_csum = rtl8169_tso_csum_v2; |
7154 | else | 7286 | dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; |
7287 | } else | ||
7155 | WARN_ON_ONCE(1); | 7288 | WARN_ON_ONCE(1); |
7156 | 7289 | ||
7157 | dev->hw_features |= NETIF_F_RXALL; | 7290 | dev->hw_features |= NETIF_F_RXALL; |