aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorHao Zheng <hzheng@nicira.com>2010-11-11 08:47:59 -0500
committerDavid S. Miller <davem@davemloft.net>2010-11-12 15:30:58 -0500
commit5e09a10521681fe7808b1c4911a6d9c7fee55f82 (patch)
tree96f4e50a9a55ed84a6a5589c30e3c9b192a8f91b /drivers/net
parentd0d9d8ef5949551276f635cb04969184ba1a9553 (diff)
ixgbe: Look inside vlan when determining offload protocol.
Currently the skb->protocol field is used to setup various offloading parameters on transmit for the correct protocol. However, if vlan offloading is disabled or otherwise not used, the protocol field will be ETH_P_8021Q, not the actual protocol. This will cause the offloading to be not performed correctly, even though the hardware is capable of looking inside vlan tags. Instead, look inside the header if necessary to determine the correct protocol type. To some extent this fixes a regression from 2.6.36 because it was previously not possible to disable vlan offloading and this error case was not exposed. Signed-off-by: Hao Zheng <hzheng@nicira.com> CC: Jeff Kirsher <jeffrey.t.kirsher@intel.com> CC: Alex Duyck <alexander.h.duyck@intel.com> CC: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Jesse Gross <jesse@nicira.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c60
1 files changed, 32 insertions, 28 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2bd3eb4ee5a..fbad4d81960 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
764#ifdef IXGBE_FCOE 764#ifdef IXGBE_FCOE
765 /* adjust for FCoE Sequence Offload */ 765 /* adjust for FCoE Sequence Offload */
766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
767 && (skb->protocol == htons(ETH_P_FCOE)) && 767 && skb_is_gso(skb)
768 skb_is_gso(skb)) { 768 && vlan_get_protocol(skb) ==
769 htons(ETH_P_FCOE)) {
769 hlen = skb_transport_offset(skb) + 770 hlen = skb_transport_offset(skb) +
770 sizeof(struct fc_frame_header) + 771 sizeof(struct fc_frame_header) +
771 sizeof(struct fcoe_crc_eof); 772 sizeof(struct fcoe_crc_eof);
@@ -5823,7 +5824,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5823 5824
5824static int ixgbe_tso(struct ixgbe_adapter *adapter, 5825static int ixgbe_tso(struct ixgbe_adapter *adapter,
5825 struct ixgbe_ring *tx_ring, struct sk_buff *skb, 5826 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
5826 u32 tx_flags, u8 *hdr_len) 5827 u32 tx_flags, u8 *hdr_len, __be16 protocol)
5827{ 5828{
5828 struct ixgbe_adv_tx_context_desc *context_desc; 5829 struct ixgbe_adv_tx_context_desc *context_desc;
5829 unsigned int i; 5830 unsigned int i;
@@ -5841,7 +5842,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5841 l4len = tcp_hdrlen(skb); 5842 l4len = tcp_hdrlen(skb);
5842 *hdr_len += l4len; 5843 *hdr_len += l4len;
5843 5844
5844 if (skb->protocol == htons(ETH_P_IP)) { 5845 if (protocol == htons(ETH_P_IP)) {
5845 struct iphdr *iph = ip_hdr(skb); 5846 struct iphdr *iph = ip_hdr(skb);
5846 iph->tot_len = 0; 5847 iph->tot_len = 0;
5847 iph->check = 0; 5848 iph->check = 0;
@@ -5880,7 +5881,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5880 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 5881 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
5881 IXGBE_ADVTXD_DTYP_CTXT); 5882 IXGBE_ADVTXD_DTYP_CTXT);
5882 5883
5883 if (skb->protocol == htons(ETH_P_IP)) 5884 if (protocol == htons(ETH_P_IP))
5884 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 5885 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
5885 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 5886 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
5886 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5887 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +5907,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
5906 return false; 5907 return false;
5907} 5908}
5908 5909
5909static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) 5910static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5911 __be16 protocol)
5910{ 5912{
5911 u32 rtn = 0; 5913 u32 rtn = 0;
5912 __be16 protocol;
5913
5914 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5915 protocol = ((const struct vlan_ethhdr *)skb->data)->
5916 h_vlan_encapsulated_proto;
5917 else
5918 protocol = skb->protocol;
5919 5914
5920 switch (protocol) { 5915 switch (protocol) {
5921 case cpu_to_be16(ETH_P_IP): 5916 case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +5938,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5943 default: 5938 default:
5944 if (unlikely(net_ratelimit())) 5939 if (unlikely(net_ratelimit()))
5945 e_warn(probe, "partial checksum but proto=%x!\n", 5940 e_warn(probe, "partial checksum but proto=%x!\n",
5946 skb->protocol); 5941 protocol);
5947 break; 5942 break;
5948 } 5943 }
5949 5944
@@ -5952,7 +5947,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
5952 5947
5953static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 5948static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5954 struct ixgbe_ring *tx_ring, 5949 struct ixgbe_ring *tx_ring,
5955 struct sk_buff *skb, u32 tx_flags) 5950 struct sk_buff *skb, u32 tx_flags,
5951 __be16 protocol)
5956{ 5952{
5957 struct ixgbe_adv_tx_context_desc *context_desc; 5953 struct ixgbe_adv_tx_context_desc *context_desc;
5958 unsigned int i; 5954 unsigned int i;
@@ -5981,7 +5977,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
5981 IXGBE_ADVTXD_DTYP_CTXT); 5977 IXGBE_ADVTXD_DTYP_CTXT);
5982 5978
5983 if (skb->ip_summed == CHECKSUM_PARTIAL) 5979 if (skb->ip_summed == CHECKSUM_PARTIAL)
5984 type_tucmd_mlhl |= ixgbe_psum(adapter, skb); 5980 type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
5985 5981
5986 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 5982 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
5987 /* use index zero for tx checksum offload */ 5983 /* use index zero for tx checksum offload */
@@ -6179,7 +6175,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6179} 6175}
6180 6176
6181static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6177static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6182 int queue, u32 tx_flags) 6178 int queue, u32 tx_flags, __be16 protocol)
6183{ 6179{
6184 struct ixgbe_atr_input atr_input; 6180 struct ixgbe_atr_input atr_input;
6185 struct tcphdr *th; 6181 struct tcphdr *th;
@@ -6190,7 +6186,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6190 u8 l4type = 0; 6186 u8 l4type = 0;
6191 6187
6192 /* Right now, we support IPv4 only */ 6188 /* Right now, we support IPv4 only */
6193 if (skb->protocol != htons(ETH_P_IP)) 6189 if (protocol != htons(ETH_P_IP))
6194 return; 6190 return;
6195 /* check if we're UDP or TCP */ 6191 /* check if we're UDP or TCP */
6196 if (iph->protocol == IPPROTO_TCP) { 6192 if (iph->protocol == IPPROTO_TCP) {
@@ -6257,10 +6253,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6257{ 6253{
6258 struct ixgbe_adapter *adapter = netdev_priv(dev); 6254 struct ixgbe_adapter *adapter = netdev_priv(dev);
6259 int txq = smp_processor_id(); 6255 int txq = smp_processor_id();
6260
6261#ifdef IXGBE_FCOE 6256#ifdef IXGBE_FCOE
6262 if ((skb->protocol == htons(ETH_P_FCOE)) || 6257 __be16 protocol;
6263 (skb->protocol == htons(ETH_P_FIP))) { 6258
6259 protocol = vlan_get_protocol(skb);
6260
6261 if ((protocol == htons(ETH_P_FCOE)) ||
6262 (protocol == htons(ETH_P_FIP))) {
6264 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 6263 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
6265 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); 6264 txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
6266 txq += adapter->ring_feature[RING_F_FCOE].mask; 6265 txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6303,6 +6302,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6303 int tso; 6302 int tso;
6304 int count = 0; 6303 int count = 0;
6305 unsigned int f; 6304 unsigned int f;
6305 __be16 protocol;
6306
6307 protocol = vlan_get_protocol(skb);
6306 6308
6307 if (vlan_tx_tag_present(skb)) { 6309 if (vlan_tx_tag_present(skb)) {
6308 tx_flags |= vlan_tx_tag_get(skb); 6310 tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6325,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6323 /* for FCoE with DCB, we force the priority to what 6325 /* for FCoE with DCB, we force the priority to what
6324 * was specified by the switch */ 6326 * was specified by the switch */
6325 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && 6327 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
6326 (skb->protocol == htons(ETH_P_FCOE) || 6328 (protocol == htons(ETH_P_FCOE) ||
6327 skb->protocol == htons(ETH_P_FIP))) { 6329 protocol == htons(ETH_P_FIP))) {
6328#ifdef CONFIG_IXGBE_DCB 6330#ifdef CONFIG_IXGBE_DCB
6329 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6331 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
6330 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK 6332 tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6336,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6334 } 6336 }
6335#endif 6337#endif
6336 /* flag for FCoE offloads */ 6338 /* flag for FCoE offloads */
6337 if (skb->protocol == htons(ETH_P_FCOE)) 6339 if (protocol == htons(ETH_P_FCOE))
6338 tx_flags |= IXGBE_TX_FLAGS_FCOE; 6340 tx_flags |= IXGBE_TX_FLAGS_FCOE;
6339 } 6341 }
6340#endif 6342#endif
@@ -6368,9 +6370,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6368 tx_flags |= IXGBE_TX_FLAGS_FSO; 6370 tx_flags |= IXGBE_TX_FLAGS_FSO;
6369#endif /* IXGBE_FCOE */ 6371#endif /* IXGBE_FCOE */
6370 } else { 6372 } else {
6371 if (skb->protocol == htons(ETH_P_IP)) 6373 if (protocol == htons(ETH_P_IP))
6372 tx_flags |= IXGBE_TX_FLAGS_IPV4; 6374 tx_flags |= IXGBE_TX_FLAGS_IPV4;
6373 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 6375 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
6376 protocol);
6374 if (tso < 0) { 6377 if (tso < 0) {
6375 dev_kfree_skb_any(skb); 6378 dev_kfree_skb_any(skb);
6376 return NETDEV_TX_OK; 6379 return NETDEV_TX_OK;
@@ -6378,7 +6381,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6378 6381
6379 if (tso) 6382 if (tso)
6380 tx_flags |= IXGBE_TX_FLAGS_TSO; 6383 tx_flags |= IXGBE_TX_FLAGS_TSO;
6381 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 6384 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
6385 protocol) &&
6382 (skb->ip_summed == CHECKSUM_PARTIAL)) 6386 (skb->ip_summed == CHECKSUM_PARTIAL))
6383 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6387 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6384 } 6388 }
@@ -6392,7 +6396,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6392 test_bit(__IXGBE_FDIR_INIT_DONE, 6396 test_bit(__IXGBE_FDIR_INIT_DONE,
6393 &tx_ring->reinit_state)) { 6397 &tx_ring->reinit_state)) {
6394 ixgbe_atr(adapter, skb, tx_ring->queue_index, 6398 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6395 tx_flags); 6399 tx_flags, protocol);
6396 tx_ring->atr_count = 0; 6400 tx_ring->atr_count = 0;
6397 } 6401 }
6398 } 6402 }