diff options
author | Dhananjay Phadke <dhananjay@netxen.com> | 2009-01-14 23:48:11 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-01-14 23:48:11 -0500 |
commit | 391587c3447d99b842a647f8e701895c9eea050b (patch) | |
tree | 5acf2f1282d068195261216953a1b71f20fb1c9a /drivers/net/netxen/netxen_nic_main.c | |
parent | 2edbb454428729f450f7a0aabbf95ac62b46b78a (diff) |
netxen: fix ipv6 offload and tx cleanup
o fix the ip/tcp hdr offset in tx descriptors for ipv6.
o cleanup xmit function, move the tso checks into separate function,
this reduces unnecessary endian conversions back and forth.
o optimize macros to initialize tx descriptors.
Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/netxen/netxen_nic_main.c')
-rw-r--r-- | drivers/net/netxen/netxen_nic_main.c | 101 |
1 files changed, 45 insertions, 56 deletions
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index ba01524b5531..cb3912381802 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include "netxen_nic_phan_reg.h" | 39 | #include "netxen_nic_phan_reg.h" |
40 | 40 | ||
41 | #include <linux/dma-mapping.h> | 41 | #include <linux/dma-mapping.h> |
42 | #include <linux/if_vlan.h> | ||
42 | #include <net/ip.h> | 43 | #include <net/ip.h> |
43 | 44 | ||
44 | MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); | 45 | MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); |
@@ -1137,29 +1138,46 @@ static int netxen_nic_close(struct net_device *netdev) | |||
1137 | return 0; | 1138 | return 0; |
1138 | } | 1139 | } |
1139 | 1140 | ||
1140 | void netxen_tso_check(struct netxen_adapter *adapter, | 1141 | static bool netxen_tso_check(struct net_device *netdev, |
1141 | struct cmd_desc_type0 *desc, struct sk_buff *skb) | 1142 | struct cmd_desc_type0 *desc, struct sk_buff *skb) |
1142 | { | 1143 | { |
1143 | if (desc->mss) { | 1144 | bool tso = false; |
1144 | desc->total_hdr_length = (sizeof(struct ethhdr) + | 1145 | u8 opcode = TX_ETHER_PKT; |
1145 | ip_hdrlen(skb) + tcp_hdrlen(skb)); | ||
1146 | 1146 | ||
1147 | if ((NX_IS_REVISION_P3(adapter->ahw.revision_id)) && | 1147 | if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && |
1148 | (skb->protocol == htons(ETH_P_IPV6))) | 1148 | skb_shinfo(skb)->gso_size > 0) { |
1149 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO6); | 1149 | |
1150 | else | 1150 | desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
1151 | netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO); | 1151 | desc->total_hdr_length = |
1152 | skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
1153 | |||
1154 | opcode = (skb->protocol == htons(ETH_P_IPV6)) ? | ||
1155 | TX_TCP_LSO6 : TX_TCP_LSO; | ||
1156 | tso = true; | ||
1152 | 1157 | ||
1153 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1158 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1154 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 1159 | u8 l4proto; |
1155 | netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT); | 1160 | |
1156 | else if (ip_hdr(skb)->protocol == IPPROTO_UDP) | 1161 | if (skb->protocol == htons(ETH_P_IP)) { |
1157 | netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT); | 1162 | l4proto = ip_hdr(skb)->protocol; |
1158 | else | 1163 | |
1159 | return; | 1164 | if (l4proto == IPPROTO_TCP) |
1165 | opcode = TX_TCP_PKT; | ||
1166 | else if(l4proto == IPPROTO_UDP) | ||
1167 | opcode = TX_UDP_PKT; | ||
1168 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | ||
1169 | l4proto = ipv6_hdr(skb)->nexthdr; | ||
1170 | |||
1171 | if (l4proto == IPPROTO_TCP) | ||
1172 | opcode = TX_TCPV6_PKT; | ||
1173 | else if(l4proto == IPPROTO_UDP) | ||
1174 | opcode = TX_UDPV6_PKT; | ||
1175 | } | ||
1160 | } | 1176 | } |
1161 | desc->tcp_hdr_offset = skb_transport_offset(skb); | 1177 | desc->tcp_hdr_offset = skb_transport_offset(skb); |
1162 | desc->ip_hdr_offset = skb_network_offset(skb); | 1178 | desc->ip_hdr_offset = skb_network_offset(skb); |
1179 | netxen_set_tx_flags_opcode(desc, 0, opcode); | ||
1180 | return tso; | ||
1163 | } | 1181 | } |
1164 | 1182 | ||
1165 | static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 1183 | static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
@@ -1167,33 +1185,20 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1167 | struct netxen_adapter *adapter = netdev_priv(netdev); | 1185 | struct netxen_adapter *adapter = netdev_priv(netdev); |
1168 | struct netxen_hardware_context *hw = &adapter->ahw; | 1186 | struct netxen_hardware_context *hw = &adapter->ahw; |
1169 | unsigned int first_seg_len = skb->len - skb->data_len; | 1187 | unsigned int first_seg_len = skb->len - skb->data_len; |
1188 | struct netxen_cmd_buffer *pbuf; | ||
1170 | struct netxen_skb_frag *buffrag; | 1189 | struct netxen_skb_frag *buffrag; |
1171 | unsigned int i; | 1190 | struct cmd_desc_type0 *hwdesc; |
1191 | int i, k; | ||
1172 | 1192 | ||
1173 | u32 producer, consumer; | 1193 | u32 producer, consumer; |
1174 | u32 saved_producer = 0; | 1194 | int frag_count, no_of_desc; |
1175 | struct cmd_desc_type0 *hwdesc; | ||
1176 | int k; | ||
1177 | struct netxen_cmd_buffer *pbuf = NULL; | ||
1178 | int frag_count; | ||
1179 | int no_of_desc; | ||
1180 | u32 num_txd = adapter->max_tx_desc_count; | 1195 | u32 num_txd = adapter->max_tx_desc_count; |
1196 | bool is_tso = false; | ||
1181 | 1197 | ||
1182 | frag_count = skb_shinfo(skb)->nr_frags + 1; | 1198 | frag_count = skb_shinfo(skb)->nr_frags + 1; |
1183 | 1199 | ||
1184 | /* There 4 fragments per descriptor */ | 1200 | /* There 4 fragments per descriptor */ |
1185 | no_of_desc = (frag_count + 3) >> 2; | 1201 | no_of_desc = (frag_count + 3) >> 2; |
1186 | if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) { | ||
1187 | if (skb_shinfo(skb)->gso_size > 0) { | ||
1188 | |||
1189 | no_of_desc++; | ||
1190 | if ((ip_hdrlen(skb) + tcp_hdrlen(skb) + | ||
1191 | sizeof(struct ethhdr)) > | ||
1192 | (sizeof(struct cmd_desc_type0) - 2)) { | ||
1193 | no_of_desc++; | ||
1194 | } | ||
1195 | } | ||
1196 | } | ||
1197 | 1202 | ||
1198 | producer = adapter->cmd_producer; | 1203 | producer = adapter->cmd_producer; |
1199 | smp_mb(); | 1204 | smp_mb(); |
@@ -1205,34 +1210,22 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1205 | } | 1210 | } |
1206 | 1211 | ||
1207 | /* Copy the descriptors into the hardware */ | 1212 | /* Copy the descriptors into the hardware */ |
1208 | saved_producer = producer; | ||
1209 | hwdesc = &hw->cmd_desc_head[producer]; | 1213 | hwdesc = &hw->cmd_desc_head[producer]; |
1210 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); | 1214 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); |
1211 | /* Take skb->data itself */ | 1215 | /* Take skb->data itself */ |
1212 | pbuf = &adapter->cmd_buf_arr[producer]; | 1216 | pbuf = &adapter->cmd_buf_arr[producer]; |
1213 | if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && | 1217 | |
1214 | skb_shinfo(skb)->gso_size > 0) { | 1218 | is_tso = netxen_tso_check(netdev, hwdesc, skb); |
1215 | pbuf->mss = skb_shinfo(skb)->gso_size; | 1219 | |
1216 | hwdesc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | ||
1217 | } else { | ||
1218 | pbuf->mss = 0; | ||
1219 | hwdesc->mss = 0; | ||
1220 | } | ||
1221 | pbuf->total_length = skb->len; | ||
1222 | pbuf->skb = skb; | 1220 | pbuf->skb = skb; |
1223 | pbuf->cmd = TX_ETHER_PKT; | ||
1224 | pbuf->frag_count = frag_count; | 1221 | pbuf->frag_count = frag_count; |
1225 | pbuf->port = adapter->portnum; | ||
1226 | buffrag = &pbuf->frag_array[0]; | 1222 | buffrag = &pbuf->frag_array[0]; |
1227 | buffrag->dma = pci_map_single(adapter->pdev, skb->data, first_seg_len, | 1223 | buffrag->dma = pci_map_single(adapter->pdev, skb->data, first_seg_len, |
1228 | PCI_DMA_TODEVICE); | 1224 | PCI_DMA_TODEVICE); |
1229 | buffrag->length = first_seg_len; | 1225 | buffrag->length = first_seg_len; |
1230 | netxen_set_cmd_desc_totallength(hwdesc, skb->len); | 1226 | netxen_set_tx_frags_len(hwdesc, frag_count, skb->len); |
1231 | netxen_set_cmd_desc_num_of_buff(hwdesc, frag_count); | 1227 | netxen_set_tx_port(hwdesc, adapter->portnum); |
1232 | netxen_set_cmd_desc_opcode(hwdesc, TX_ETHER_PKT); | ||
1233 | 1228 | ||
1234 | netxen_set_cmd_desc_port(hwdesc, adapter->portnum); | ||
1235 | netxen_set_cmd_desc_ctxid(hwdesc, adapter->portnum); | ||
1236 | hwdesc->buffer1_length = cpu_to_le16(first_seg_len); | 1229 | hwdesc->buffer1_length = cpu_to_le16(first_seg_len); |
1237 | hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); | 1230 | hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); |
1238 | 1231 | ||
@@ -1285,16 +1278,12 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1285 | } | 1278 | } |
1286 | producer = get_next_index(producer, num_txd); | 1279 | producer = get_next_index(producer, num_txd); |
1287 | 1280 | ||
1288 | /* might change opcode to TX_TCP_LSO */ | ||
1289 | netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb); | ||
1290 | |||
1291 | /* For LSO, we need to copy the MAC/IP/TCP headers into | 1281 | /* For LSO, we need to copy the MAC/IP/TCP headers into |
1292 | * the descriptor ring | 1282 | * the descriptor ring |
1293 | */ | 1283 | */ |
1294 | if (netxen_get_cmd_desc_opcode(&hw->cmd_desc_head[saved_producer]) | 1284 | if (is_tso) { |
1295 | == TX_TCP_LSO) { | ||
1296 | int hdr_len, first_hdr_len, more_hdr; | 1285 | int hdr_len, first_hdr_len, more_hdr; |
1297 | hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length; | 1286 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1298 | if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) { | 1287 | if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) { |
1299 | first_hdr_len = sizeof(struct cmd_desc_type0) - 2; | 1288 | first_hdr_len = sizeof(struct cmd_desc_type0) - 2; |
1300 | more_hdr = 1; | 1289 | more_hdr = 1; |