aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2011-08-26 03:44:32 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-10-07 01:59:27 -0400
commit7d13a7d0da74d127457cc6f88e47fd8e85960a13 (patch)
tree89766c0ade904cda76c1307455ab9f8986d4057d /drivers/net/ethernet
parent0603464956e863810af60c08b4b2e8ab50363a54 (diff)
igb: Consolidate creation of Tx context descriptors into a single function
This patch is meant to simplify the transmit path by reducing the overhead for creating a transmit context descriptor. The current implementation is split with igb_tso and igb_tx_csum doing two separate implementations on how to setup the tx_buffer_info structure and the tx_desc. By combining them it is possible to reduce code and simplify things since now only one function will create context descriptors. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c233
1 files changed, 106 insertions, 127 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2bdc78368b64..a0bb81d9ef1b 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -45,6 +45,9 @@
45#include <linux/pci-aspm.h> 45#include <linux/pci-aspm.h>
46#include <linux/delay.h> 46#include <linux/delay.h>
47#include <linux/interrupt.h> 47#include <linux/interrupt.h>
48#include <linux/ip.h>
49#include <linux/tcp.h>
50#include <linux/sctp.h>
48#include <linux/if_ether.h> 51#include <linux/if_ether.h>
49#include <linux/aer.h> 52#include <linux/aer.h>
50#include <linux/prefetch.h> 53#include <linux/prefetch.h>
@@ -3960,16 +3963,39 @@ set_itr_now:
3960#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 3963#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3961#define IGB_TX_FLAGS_VLAN_SHIFT 16 3964#define IGB_TX_FLAGS_VLAN_SHIFT 16
3962 3965
3966void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
3967 u32 type_tucmd, u32 mss_l4len_idx)
3968{
3969 struct e1000_adv_tx_context_desc *context_desc;
3970 u16 i = tx_ring->next_to_use;
3971
3972 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
3973
3974 i++;
3975 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3976
3977 /* set bits to identify this as an advanced context descriptor */
3978 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3979
3980 /* For 82575, context index must be unique per ring. */
3981 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3982 mss_l4len_idx |= tx_ring->reg_idx << 4;
3983
3984 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3985 context_desc->seqnum_seed = 0;
3986 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3987 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3988}
3989
3963static inline int igb_tso(struct igb_ring *tx_ring, 3990static inline int igb_tso(struct igb_ring *tx_ring,
3964 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 3991 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3965{ 3992{
3966 struct e1000_adv_tx_context_desc *context_desc;
3967 unsigned int i;
3968 int err; 3993 int err;
3969 struct igb_tx_buffer *buffer_info; 3994 u32 vlan_macip_lens, type_tucmd;
3970 u32 info = 0, tu_cmd = 0; 3995 u32 mss_l4len_idx, l4len;
3971 u32 mss_l4len_idx; 3996
3972 u8 l4len; 3997 if (!skb_is_gso(skb))
3998 return 0;
3973 3999
3974 if (skb_header_cloned(skb)) { 4000 if (skb_header_cloned(skb)) {
3975 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4001 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
@@ -3977,8 +4003,8 @@ static inline int igb_tso(struct igb_ring *tx_ring,
3977 return err; 4003 return err;
3978 } 4004 }
3979 4005
3980 l4len = tcp_hdrlen(skb); 4006 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3981 *hdr_len += l4len; 4007 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
3982 4008
3983 if (skb->protocol == htons(ETH_P_IP)) { 4009 if (skb->protocol == htons(ETH_P_IP)) {
3984 struct iphdr *iph = ip_hdr(skb); 4010 struct iphdr *iph = ip_hdr(skb);
@@ -3988,6 +4014,7 @@ static inline int igb_tso(struct igb_ring *tx_ring,
3988 iph->daddr, 0, 4014 iph->daddr, 0,
3989 IPPROTO_TCP, 4015 IPPROTO_TCP,
3990 0); 4016 0);
4017 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
3991 } else if (skb_is_gso_v6(skb)) { 4018 } else if (skb_is_gso_v6(skb)) {
3992 ipv6_hdr(skb)->payload_len = 0; 4019 ipv6_hdr(skb)->payload_len = 0;
3993 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4020 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -3995,131 +4022,85 @@ static inline int igb_tso(struct igb_ring *tx_ring,
3995 0, IPPROTO_TCP, 0); 4022 0, IPPROTO_TCP, 0);
3996 } 4023 }
3997 4024
3998 i = tx_ring->next_to_use; 4025 l4len = tcp_hdrlen(skb);
3999 4026 *hdr_len = skb_transport_offset(skb) + l4len;
4000 buffer_info = &tx_ring->tx_buffer_info[i];
4001 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4002 /* VLAN MACLEN IPLEN */
4003 if (tx_flags & IGB_TX_FLAGS_VLAN)
4004 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
4005 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
4006 *hdr_len += skb_network_offset(skb);
4007 info |= skb_network_header_len(skb);
4008 *hdr_len += skb_network_header_len(skb);
4009 context_desc->vlan_macip_lens = cpu_to_le32(info);
4010
4011 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4012 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
4013
4014 if (skb->protocol == htons(ETH_P_IP))
4015 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
4016 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4017
4018 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
4019 4027
4020 /* MSS L4LEN IDX */ 4028 /* MSS L4LEN IDX */
4021 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 4029 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4022 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 4030 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
4023 4031
4024 /* For 82575, context index must be unique per ring. */ 4032 /* VLAN MACLEN IPLEN */
4025 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) 4033 vlan_macip_lens = skb_network_header_len(skb);
4026 mss_l4len_idx |= tx_ring->reg_idx << 4; 4034 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
4027 4035 vlan_macip_lens |= tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4028 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4029 context_desc->seqnum_seed = 0;
4030
4031 buffer_info->time_stamp = jiffies;
4032 buffer_info->next_to_watch = i;
4033 buffer_info->dma = 0;
4034 i++;
4035 if (i == tx_ring->count)
4036 i = 0;
4037 4036
4038 tx_ring->next_to_use = i; 4037 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4039 4038
4040 return true; 4039 return 1;
4041} 4040}
4042 4041
4043static inline bool igb_tx_csum(struct igb_ring *tx_ring, 4042static inline bool igb_tx_csum(struct igb_ring *tx_ring,
4044 struct sk_buff *skb, u32 tx_flags) 4043 struct sk_buff *skb, u32 tx_flags)
4045{ 4044{
4046 struct e1000_adv_tx_context_desc *context_desc; 4045 u32 vlan_macip_lens = 0;
4047 struct device *dev = tx_ring->dev; 4046 u32 mss_l4len_idx = 0;
4048 struct igb_tx_buffer *buffer_info; 4047 u32 type_tucmd = 0;
4049 u32 info = 0, tu_cmd = 0;
4050 unsigned int i;
4051
4052 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
4053 (tx_flags & IGB_TX_FLAGS_VLAN)) {
4054 i = tx_ring->next_to_use;
4055 buffer_info = &tx_ring->tx_buffer_info[i];
4056 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4057
4058 if (tx_flags & IGB_TX_FLAGS_VLAN)
4059 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
4060
4061 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
4062 if (skb->ip_summed == CHECKSUM_PARTIAL)
4063 info |= skb_network_header_len(skb);
4064
4065 context_desc->vlan_macip_lens = cpu_to_le32(info);
4066
4067 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
4068
4069 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4070 __be16 protocol;
4071 4048
4072 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { 4049 if (skb->ip_summed != CHECKSUM_PARTIAL) {
4073 const struct vlan_ethhdr *vhdr = 4050 if (!(tx_flags & IGB_TX_FLAGS_VLAN))
4074 (const struct vlan_ethhdr*)skb->data; 4051 return false;
4075 4052 } else {
4076 protocol = vhdr->h_vlan_encapsulated_proto; 4053 u8 l4_hdr = 0;
4077 } else { 4054 switch (skb->protocol) {
4078 protocol = skb->protocol; 4055 case __constant_htons(ETH_P_IP):
4056 vlan_macip_lens |= skb_network_header_len(skb);
4057 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4058 l4_hdr = ip_hdr(skb)->protocol;
4059 break;
4060 case __constant_htons(ETH_P_IPV6):
4061 vlan_macip_lens |= skb_network_header_len(skb);
4062 l4_hdr = ipv6_hdr(skb)->nexthdr;
4063 break;
4064 default:
4065 if (unlikely(net_ratelimit())) {
4066 dev_warn(tx_ring->dev,
4067 "partial checksum but proto=%x!\n",
4068 skb->protocol);
4079 } 4069 }
4070 break;
4071 }
4080 4072
4081 switch (protocol) { 4073 switch (l4_hdr) {
4082 case cpu_to_be16(ETH_P_IP): 4074 case IPPROTO_TCP:
4083 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 4075 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4084 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 4076 mss_l4len_idx = tcp_hdrlen(skb) <<
4085 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 4077 E1000_ADVTXD_L4LEN_SHIFT;
4086 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP) 4078 break;
4087 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; 4079 case IPPROTO_SCTP:
4088 break; 4080 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4089 case cpu_to_be16(ETH_P_IPV6): 4081 mss_l4len_idx = sizeof(struct sctphdr) <<
4090 /* XXX what about other V6 headers?? */ 4082 E1000_ADVTXD_L4LEN_SHIFT;
4091 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 4083 break;
4092 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 4084 case IPPROTO_UDP:
4093 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP) 4085 mss_l4len_idx = sizeof(struct udphdr) <<
4094 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; 4086 E1000_ADVTXD_L4LEN_SHIFT;
4095 break; 4087 break;
4096 default: 4088 default:
4097 if (unlikely(net_ratelimit())) 4089 if (unlikely(net_ratelimit())) {
4098 dev_warn(dev, 4090 dev_warn(tx_ring->dev,
4099 "partial checksum but proto=%x!\n", 4091 "partial checksum but l4 proto=%x!\n",
4100 skb->protocol); 4092 l4_hdr);
4101 break;
4102 } 4093 }
4094 break;
4103 } 4095 }
4096 }
4104 4097
4105 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 4098 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
4106 context_desc->seqnum_seed = 0; 4099 vlan_macip_lens |= tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4107 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
4108 context_desc->mss_l4len_idx =
4109 cpu_to_le32(tx_ring->reg_idx << 4);
4110 4100
4111 buffer_info->time_stamp = jiffies; 4101 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4112 buffer_info->next_to_watch = i;
4113 buffer_info->dma = 0;
4114 4102
4115 i++; 4103 return (skb->ip_summed == CHECKSUM_PARTIAL);
4116 if (i == tx_ring->count)
4117 i = 0;
4118 tx_ring->next_to_use = i;
4119
4120 return true;
4121 }
4122 return false;
4123} 4104}
4124 4105
4125#define IGB_MAX_TXD_PWR 16 4106#define IGB_MAX_TXD_PWR 16
@@ -4140,8 +4121,6 @@ static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
4140 buffer_info = &tx_ring->tx_buffer_info[i]; 4121 buffer_info = &tx_ring->tx_buffer_info[i];
4141 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD); 4122 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
4142 buffer_info->length = hlen; 4123 buffer_info->length = hlen;
4143 /* set time_stamp *before* dma to help avoid a possible race */
4144 buffer_info->time_stamp = jiffies;
4145 buffer_info->next_to_watch = i; 4124 buffer_info->next_to_watch = i;
4146 buffer_info->dma = dma_map_single(dev, skb->data, hlen, 4125 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
4147 DMA_TO_DEVICE); 4126 DMA_TO_DEVICE);
@@ -4160,7 +4139,6 @@ static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
4160 buffer_info = &tx_ring->tx_buffer_info[i]; 4139 buffer_info = &tx_ring->tx_buffer_info[i];
4161 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 4140 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
4162 buffer_info->length = len; 4141 buffer_info->length = len;
4163 buffer_info->time_stamp = jiffies;
4164 buffer_info->next_to_watch = i; 4142 buffer_info->next_to_watch = i;
4165 buffer_info->mapped_as_page = true; 4143 buffer_info->mapped_as_page = true;
4166 buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len, 4144 buffer_info->dma = skb_frag_dma_map(dev, frag, 0, len,
@@ -4176,6 +4154,7 @@ static inline int igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
4176 buffer_info->bytecount = ((gso_segs - 1) * hlen) + skb->len; 4154 buffer_info->bytecount = ((gso_segs - 1) * hlen) + skb->len;
4177 buffer_info->gso_segs = gso_segs; 4155 buffer_info->gso_segs = gso_segs;
4178 tx_ring->tx_buffer_info[first].next_to_watch = i; 4156 tx_ring->tx_buffer_info[first].next_to_watch = i;
4157 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
4179 4158
4180 return ++count; 4159 return ++count;
4181 4160
@@ -4304,7 +4283,7 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4304netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 4283netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4305 struct igb_ring *tx_ring) 4284 struct igb_ring *tx_ring)
4306{ 4285{
4307 int tso = 0, count; 4286 int tso, count;
4308 u32 tx_flags = 0; 4287 u32 tx_flags = 0;
4309 u16 first; 4288 u16 first;
4310 u8 hdr_len = 0; 4289 u8 hdr_len = 0;
@@ -4333,16 +4312,12 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4333 tx_flags |= IGB_TX_FLAGS_IPV4; 4312 tx_flags |= IGB_TX_FLAGS_IPV4;
4334 4313
4335 first = tx_ring->next_to_use; 4314 first = tx_ring->next_to_use;
4336 if (skb_is_gso(skb)) {
4337 tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len);
4338 4315
4339 if (tso < 0) { 4316 tso = igb_tso(tx_ring, skb, tx_flags, &hdr_len);
4340 dev_kfree_skb_any(skb);
4341 return NETDEV_TX_OK;
4342 }
4343 }
4344 4317
4345 if (tso) 4318 if (tso < 0)
4319 goto out_drop;
4320 else if (tso)
4346 tx_flags |= IGB_TX_FLAGS_TSO; 4321 tx_flags |= IGB_TX_FLAGS_TSO;
4347 else if (igb_tx_csum(tx_ring, skb, tx_flags) && 4322 else if (igb_tx_csum(tx_ring, skb, tx_flags) &&
4348 (skb->ip_summed == CHECKSUM_PARTIAL)) 4323 (skb->ip_summed == CHECKSUM_PARTIAL))
@@ -4366,6 +4341,10 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4366 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); 4341 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
4367 4342
4368 return NETDEV_TX_OK; 4343 return NETDEV_TX_OK;
4344
4345out_drop:
4346 dev_kfree_skb_any(skb);
4347 return NETDEV_TX_OK;
4369} 4348}
4370 4349
4371static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, 4350static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,