diff options
author | Anton Blanchard <anton@samba.org> | 2011-10-14 01:31:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-10-17 19:00:55 -0400 |
commit | d695c335f9165cb73f9389479cce755e8207b5f4 (patch) | |
tree | 23e72cf67fbe555e23d00001983a857a2e9666dc | |
parent | 945db2d4f4f6caf75b988f78e40aa75145ee46a4 (diff) |
ehea: Simplify ehea_xmit2 and ehea_xmit3
Based on a patch from Michael Ellerman, clean up a significant
portion of the transmit path. There was a lot of duplication here.
Even worse, we were always checksumming tx packets and ignoring the
skb->ip_summed field.
Also remove NETIF_F_FRAGLIST from dev->features, I'm not sure why
it was enabled.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/ibm/ehea/ehea_main.c | 137 |
1 files changed, 36 insertions, 101 deletions
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 13218092769c..77aafba8272c 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -1676,37 +1676,6 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr) | |||
1676 | return ret; | 1676 | return ret; |
1677 | } | 1677 | } |
1678 | 1678 | ||
1679 | /* | ||
1680 | * The write_* functions store information in swqe which is used by | ||
1681 | * the hardware to calculate the ip/tcp/udp checksum | ||
1682 | */ | ||
1683 | |||
1684 | static inline void write_ip_start_end(struct ehea_swqe *swqe, | ||
1685 | const struct sk_buff *skb) | ||
1686 | { | ||
1687 | swqe->ip_start = skb_network_offset(skb); | ||
1688 | swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1); | ||
1689 | } | ||
1690 | |||
1691 | static inline void write_tcp_offset_end(struct ehea_swqe *swqe, | ||
1692 | const struct sk_buff *skb) | ||
1693 | { | ||
1694 | swqe->tcp_offset = | ||
1695 | (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check)); | ||
1696 | |||
1697 | swqe->tcp_end = (u16)skb->len - 1; | ||
1698 | } | ||
1699 | |||
1700 | static inline void write_udp_offset_end(struct ehea_swqe *swqe, | ||
1701 | const struct sk_buff *skb) | ||
1702 | { | ||
1703 | swqe->tcp_offset = | ||
1704 | (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check)); | ||
1705 | |||
1706 | swqe->tcp_end = (u16)skb->len - 1; | ||
1707 | } | ||
1708 | |||
1709 | |||
1710 | static void write_swqe2_TSO(struct sk_buff *skb, | 1679 | static void write_swqe2_TSO(struct sk_buff *skb, |
1711 | struct ehea_swqe *swqe, u32 lkey) | 1680 | struct ehea_swqe *swqe, u32 lkey) |
1712 | { | 1681 | { |
@@ -2105,41 +2074,46 @@ static int ehea_change_mtu(struct net_device *dev, int new_mtu) | |||
2105 | return 0; | 2074 | return 0; |
2106 | } | 2075 | } |
2107 | 2076 | ||
2108 | static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | 2077 | static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe) |
2109 | struct ehea_swqe *swqe, u32 lkey) | ||
2110 | { | 2078 | { |
2111 | if (skb->protocol == htons(ETH_P_IP)) { | 2079 | swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; |
2112 | const struct iphdr *iph = ip_hdr(skb); | ||
2113 | 2080 | ||
2114 | /* IPv4 */ | 2081 | if (skb->protocol != htons(ETH_P_IP)) |
2115 | swqe->tx_control |= EHEA_SWQE_CRC | 2082 | return; |
2116 | | EHEA_SWQE_IP_CHECKSUM | ||
2117 | | EHEA_SWQE_TCP_CHECKSUM | ||
2118 | | EHEA_SWQE_IMM_DATA_PRESENT | ||
2119 | | EHEA_SWQE_DESCRIPTORS_PRESENT; | ||
2120 | 2083 | ||
2121 | write_ip_start_end(swqe, skb); | 2084 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
2085 | swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM; | ||
2122 | 2086 | ||
2123 | if (iph->protocol == IPPROTO_UDP) { | 2087 | swqe->ip_start = skb_network_offset(skb); |
2124 | if ((iph->frag_off & IP_MF) || | 2088 | swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1; |
2125 | (iph->frag_off & IP_OFFSET)) | ||
2126 | /* IP fragment, so don't change cs */ | ||
2127 | swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; | ||
2128 | else | ||
2129 | write_udp_offset_end(swqe, skb); | ||
2130 | } else if (iph->protocol == IPPROTO_TCP) { | ||
2131 | write_tcp_offset_end(swqe, skb); | ||
2132 | } | ||
2133 | 2089 | ||
2134 | /* icmp (big data) and ip segmentation packets (all other ip | 2090 | switch (ip_hdr(skb)->protocol) { |
2135 | packets) do not require any special handling */ | 2091 | case IPPROTO_UDP: |
2092 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
2093 | swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; | ||
2136 | 2094 | ||
2137 | } else { | 2095 | swqe->tcp_offset = swqe->ip_end + 1 + |
2138 | /* Other Ethernet Protocol */ | 2096 | offsetof(struct udphdr, check); |
2139 | swqe->tx_control |= EHEA_SWQE_CRC | 2097 | swqe->tcp_end = skb->len - 1; |
2140 | | EHEA_SWQE_IMM_DATA_PRESENT | 2098 | break; |
2141 | | EHEA_SWQE_DESCRIPTORS_PRESENT; | 2099 | |
2100 | case IPPROTO_TCP: | ||
2101 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
2102 | swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM; | ||
2103 | |||
2104 | swqe->tcp_offset = swqe->ip_end + 1 + | ||
2105 | offsetof(struct tcphdr, check); | ||
2106 | swqe->tcp_end = skb->len - 1; | ||
2107 | break; | ||
2142 | } | 2108 | } |
2109 | } | ||
2110 | |||
2111 | static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev, | ||
2112 | struct ehea_swqe *swqe, u32 lkey) | ||
2113 | { | ||
2114 | swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT; | ||
2115 | |||
2116 | xmit_common(skb, swqe); | ||
2143 | 2117 | ||
2144 | write_swqe2_data(skb, dev, swqe, lkey); | 2118 | write_swqe2_data(skb, dev, swqe, lkey); |
2145 | } | 2119 | } |
@@ -2152,51 +2126,11 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
2152 | skb_frag_t *frag; | 2126 | skb_frag_t *frag; |
2153 | int i; | 2127 | int i; |
2154 | 2128 | ||
2155 | if (skb->protocol == htons(ETH_P_IP)) { | 2129 | xmit_common(skb, swqe); |
2156 | const struct iphdr *iph = ip_hdr(skb); | ||
2157 | 2130 | ||
2158 | /* IPv4 */ | ||
2159 | write_ip_start_end(swqe, skb); | ||
2160 | |||
2161 | if (iph->protocol == IPPROTO_TCP) { | ||
2162 | swqe->tx_control |= EHEA_SWQE_CRC | ||
2163 | | EHEA_SWQE_IP_CHECKSUM | ||
2164 | | EHEA_SWQE_TCP_CHECKSUM | ||
2165 | | EHEA_SWQE_IMM_DATA_PRESENT; | ||
2166 | |||
2167 | write_tcp_offset_end(swqe, skb); | ||
2168 | |||
2169 | } else if (iph->protocol == IPPROTO_UDP) { | ||
2170 | if ((iph->frag_off & IP_MF) || | ||
2171 | (iph->frag_off & IP_OFFSET)) | ||
2172 | /* IP fragment, so don't change cs */ | ||
2173 | swqe->tx_control |= EHEA_SWQE_CRC | ||
2174 | | EHEA_SWQE_IMM_DATA_PRESENT; | ||
2175 | else { | ||
2176 | swqe->tx_control |= EHEA_SWQE_CRC | ||
2177 | | EHEA_SWQE_IP_CHECKSUM | ||
2178 | | EHEA_SWQE_TCP_CHECKSUM | ||
2179 | | EHEA_SWQE_IMM_DATA_PRESENT; | ||
2180 | |||
2181 | write_udp_offset_end(swqe, skb); | ||
2182 | } | ||
2183 | } else { | ||
2184 | /* icmp (big data) and | ||
2185 | ip segmentation packets (all other ip packets) */ | ||
2186 | swqe->tx_control |= EHEA_SWQE_CRC | ||
2187 | | EHEA_SWQE_IP_CHECKSUM | ||
2188 | | EHEA_SWQE_IMM_DATA_PRESENT; | ||
2189 | } | ||
2190 | } else { | ||
2191 | /* Other Ethernet Protocol */ | ||
2192 | swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT; | ||
2193 | } | ||
2194 | /* copy (immediate) data */ | ||
2195 | if (nfrags == 0) { | 2131 | if (nfrags == 0) { |
2196 | /* data is in a single piece */ | ||
2197 | skb_copy_from_linear_data(skb, imm_data, skb->len); | 2132 | skb_copy_from_linear_data(skb, imm_data, skb->len); |
2198 | } else { | 2133 | } else { |
2199 | /* first copy data from the skb->data buffer ... */ | ||
2200 | skb_copy_from_linear_data(skb, imm_data, | 2134 | skb_copy_from_linear_data(skb, imm_data, |
2201 | skb_headlen(skb)); | 2135 | skb_headlen(skb)); |
2202 | imm_data += skb_headlen(skb); | 2136 | imm_data += skb_headlen(skb); |
@@ -2208,6 +2142,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev, | |||
2208 | imm_data += frag->size; | 2142 | imm_data += frag->size; |
2209 | } | 2143 | } |
2210 | } | 2144 | } |
2145 | |||
2211 | swqe->immediate_data_length = skb->len; | 2146 | swqe->immediate_data_length = skb->len; |
2212 | dev_kfree_skb(skb); | 2147 | dev_kfree_skb(skb); |
2213 | } | 2148 | } |
@@ -3184,7 +3119,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | |||
3184 | dev->netdev_ops = &ehea_netdev_ops; | 3119 | dev->netdev_ops = &ehea_netdev_ops; |
3185 | ehea_set_ethtool_ops(dev); | 3120 | ehea_set_ethtool_ops(dev); |
3186 | 3121 | ||
3187 | dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | 3122 | dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
3188 | | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO; | 3123 | | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO; |
3189 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO | 3124 | dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO |
3190 | | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | 3125 | | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX |