diff options
author | Emil Tantilov <emil.s.tantilov@intel.com> | 2014-01-17 21:30:05 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-17 22:15:10 -0500 |
commit | 29d37fa162af3ba70229326f02831e24dcba64eb (patch) | |
tree | 5c141ddec63c0ad7bef6656d92c1bb68a83e3f74 /drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |
parent | 9bdfefd21afdd6efcc40aa009fb0f97c4179a2a5 (diff) |
ixgbevf: merge ixgbevf_tx_map and ixgbevf_tx_queue into a single function
This change merges the ixgbevf_tx_map call and the ixgbevf_tx_queue call
into a single function. In order to make room for this setting of cmd_type
and olinfo flags is done in separate functions.
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 272 |
1 files changed, 132 insertions, 140 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d6d06adfbc48..9df28985eba7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -233,8 +233,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, | |||
233 | 233 | ||
234 | /* unmap remaining buffers */ | 234 | /* unmap remaining buffers */ |
235 | while (tx_desc != eop_desc) { | 235 | while (tx_desc != eop_desc) { |
236 | tx_desc->wb.status = 0; | ||
237 | |||
238 | tx_buffer++; | 236 | tx_buffer++; |
239 | tx_desc++; | 237 | tx_desc++; |
240 | i++; | 238 | i++; |
@@ -254,8 +252,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, | |||
254 | } | 252 | } |
255 | } | 253 | } |
256 | 254 | ||
257 | tx_desc->wb.status = 0; | ||
258 | |||
259 | /* move us one more past the eop_desc for start of next pkt */ | 255 | /* move us one more past the eop_desc for start of next pkt */ |
260 | tx_buffer++; | 256 | tx_buffer++; |
261 | tx_desc++; | 257 | tx_desc++; |
@@ -2915,166 +2911,171 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, | |||
2915 | type_tucmd, mss_l4len_idx); | 2911 | type_tucmd, mss_l4len_idx); |
2916 | } | 2912 | } |
2917 | 2913 | ||
2918 | static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, | 2914 | static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) |
2919 | struct ixgbevf_tx_buffer *first) | ||
2920 | { | 2915 | { |
2921 | dma_addr_t dma; | 2916 | /* set type for advanced descriptor with frame checksum insertion */ |
2922 | struct sk_buff *skb = first->skb; | 2917 | __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | |
2923 | struct ixgbevf_tx_buffer *tx_buffer_info; | 2918 | IXGBE_ADVTXD_DCMD_IFCS | |
2924 | unsigned int len; | 2919 | IXGBE_ADVTXD_DCMD_DEXT); |
2925 | unsigned int total = skb->len; | ||
2926 | unsigned int offset = 0, size; | ||
2927 | int count = 0; | ||
2928 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | ||
2929 | unsigned int f; | ||
2930 | int i; | ||
2931 | 2920 | ||
2932 | i = tx_ring->next_to_use; | 2921 | /* set HW vlan bit if vlan is present */ |
2922 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | ||
2923 | cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); | ||
2933 | 2924 | ||
2934 | len = min(skb_headlen(skb), total); | 2925 | /* set segmentation enable bits for TSO/FSO */ |
2935 | while (len) { | 2926 | if (tx_flags & IXGBE_TX_FLAGS_TSO) |
2936 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 2927 | cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); |
2937 | size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); | ||
2938 | 2928 | ||
2939 | tx_buffer_info->tx_flags = first->tx_flags; | 2929 | return cmd_type; |
2940 | dma = dma_map_single(tx_ring->dev, skb->data + offset, | 2930 | } |
2941 | size, DMA_TO_DEVICE); | ||
2942 | if (dma_mapping_error(tx_ring->dev, dma)) | ||
2943 | goto dma_error; | ||
2944 | 2931 | ||
2945 | /* record length, and DMA address */ | 2932 | static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, |
2946 | dma_unmap_len_set(tx_buffer_info, len, size); | 2933 | u32 tx_flags, unsigned int paylen) |
2947 | dma_unmap_addr_set(tx_buffer_info, dma, dma); | 2934 | { |
2935 | __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); | ||
2948 | 2936 | ||
2949 | len -= size; | 2937 | /* enable L4 checksum for TSO and TX checksum offload */ |
2950 | total -= size; | 2938 | if (tx_flags & IXGBE_TX_FLAGS_CSUM) |
2951 | offset += size; | 2939 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); |
2952 | count++; | ||
2953 | i++; | ||
2954 | if (i == tx_ring->count) | ||
2955 | i = 0; | ||
2956 | } | ||
2957 | 2940 | ||
2958 | for (f = 0; f < nr_frags; f++) { | 2941 | /* enble IPv4 checksum for TSO */ |
2959 | const struct skb_frag_struct *frag; | 2942 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) |
2943 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); | ||
2960 | 2944 | ||
2961 | frag = &skb_shinfo(skb)->frags[f]; | 2945 | /* use index 1 context for TSO/FSO/FCOE */ |
2962 | len = min((unsigned int)skb_frag_size(frag), total); | 2946 | if (tx_flags & IXGBE_TX_FLAGS_TSO) |
2963 | offset = 0; | 2947 | olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); |
2964 | 2948 | ||
2965 | while (len) { | 2949 | /* Check Context must be set if Tx switch is enabled, which it |
2966 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 2950 | * always is for case where virtual functions are running |
2967 | size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); | 2951 | */ |
2952 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); | ||
2968 | 2953 | ||
2969 | dma = skb_frag_dma_map(tx_ring->dev, frag, | 2954 | tx_desc->read.olinfo_status = olinfo_status; |
2970 | offset, size, DMA_TO_DEVICE); | 2955 | } |
2971 | if (dma_mapping_error(tx_ring->dev, dma)) | ||
2972 | goto dma_error; | ||
2973 | 2956 | ||
2974 | /* record length, and DMA address */ | 2957 | static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, |
2975 | dma_unmap_len_set(tx_buffer_info, len, size); | 2958 | struct ixgbevf_tx_buffer *first, |
2976 | dma_unmap_addr_set(tx_buffer_info, dma, dma); | 2959 | const u8 hdr_len) |
2960 | { | ||
2961 | dma_addr_t dma; | ||
2962 | struct sk_buff *skb = first->skb; | ||
2963 | struct ixgbevf_tx_buffer *tx_buffer; | ||
2964 | union ixgbe_adv_tx_desc *tx_desc; | ||
2965 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | ||
2966 | unsigned int data_len = skb->data_len; | ||
2967 | unsigned int size = skb_headlen(skb); | ||
2968 | unsigned int paylen = skb->len - hdr_len; | ||
2969 | u32 tx_flags = first->tx_flags; | ||
2970 | __le32 cmd_type; | ||
2971 | u16 i = tx_ring->next_to_use; | ||
2977 | 2972 | ||
2978 | len -= size; | 2973 | tx_desc = IXGBEVF_TX_DESC(tx_ring, i); |
2979 | total -= size; | ||
2980 | offset += size; | ||
2981 | count++; | ||
2982 | i++; | ||
2983 | if (i == tx_ring->count) | ||
2984 | i = 0; | ||
2985 | } | ||
2986 | if (total == 0) | ||
2987 | break; | ||
2988 | } | ||
2989 | 2974 | ||
2990 | if (i == 0) | 2975 | ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); |
2991 | i = tx_ring->count - 1; | 2976 | cmd_type = ixgbevf_tx_cmd_type(tx_flags); |
2992 | else | ||
2993 | i = i - 1; | ||
2994 | 2977 | ||
2995 | first->next_to_watch = IXGBEVF_TX_DESC(tx_ring, i); | 2978 | dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); |
2996 | first->time_stamp = jiffies; | 2979 | if (dma_mapping_error(tx_ring->dev, dma)) |
2980 | goto dma_error; | ||
2997 | 2981 | ||
2998 | return count; | 2982 | /* record length, and DMA address */ |
2983 | dma_unmap_len_set(first, len, size); | ||
2984 | dma_unmap_addr_set(first, dma, dma); | ||
2999 | 2985 | ||
3000 | dma_error: | 2986 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
3001 | dev_err(tx_ring->dev, "TX DMA map failed\n"); | ||
3002 | 2987 | ||
3003 | /* clear timestamp and dma mappings for failed tx_buffer_info map */ | 2988 | for (;;) { |
3004 | tx_buffer_info->dma = 0; | 2989 | while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { |
3005 | count--; | 2990 | tx_desc->read.cmd_type_len = |
2991 | cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); | ||
3006 | 2992 | ||
3007 | /* clear timestamp and dma mappings for remaining portion of packet */ | 2993 | i++; |
3008 | while (count >= 0) { | 2994 | tx_desc++; |
3009 | count--; | 2995 | if (i == tx_ring->count) { |
3010 | i--; | 2996 | tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); |
3011 | if (i < 0) | 2997 | i = 0; |
3012 | i += tx_ring->count; | 2998 | } |
3013 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | ||
3014 | ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); | ||
3015 | } | ||
3016 | 2999 | ||
3017 | return count; | 3000 | dma += IXGBE_MAX_DATA_PER_TXD; |
3018 | } | 3001 | size -= IXGBE_MAX_DATA_PER_TXD; |
3019 | 3002 | ||
3020 | static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, | 3003 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
3021 | struct ixgbevf_tx_buffer *first, | 3004 | tx_desc->read.olinfo_status = 0; |
3022 | int count, u8 hdr_len) | 3005 | } |
3023 | { | ||
3024 | union ixgbe_adv_tx_desc *tx_desc = NULL; | ||
3025 | struct sk_buff *skb = first->skb; | ||
3026 | struct ixgbevf_tx_buffer *tx_buffer_info; | ||
3027 | u32 olinfo_status = 0, cmd_type_len = 0; | ||
3028 | u32 tx_flags = first->tx_flags; | ||
3029 | unsigned int i; | ||
3030 | 3006 | ||
3031 | u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; | 3007 | if (likely(!data_len)) |
3008 | break; | ||
3032 | 3009 | ||
3033 | cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; | 3010 | tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); |
3034 | 3011 | ||
3035 | cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; | 3012 | i++; |
3013 | tx_desc++; | ||
3014 | if (i == tx_ring->count) { | ||
3015 | tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); | ||
3016 | i = 0; | ||
3017 | } | ||
3036 | 3018 | ||
3037 | if (tx_flags & IXGBE_TX_FLAGS_VLAN) | 3019 | size = skb_frag_size(frag); |
3038 | cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; | 3020 | data_len -= size; |
3039 | 3021 | ||
3040 | if (tx_flags & IXGBE_TX_FLAGS_CSUM) | 3022 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, |
3041 | olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; | 3023 | DMA_TO_DEVICE); |
3024 | if (dma_mapping_error(tx_ring->dev, dma)) | ||
3025 | goto dma_error; | ||
3042 | 3026 | ||
3043 | if (tx_flags & IXGBE_TX_FLAGS_TSO) { | 3027 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
3044 | cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; | 3028 | dma_unmap_len_set(tx_buffer, len, size); |
3029 | dma_unmap_addr_set(tx_buffer, dma, dma); | ||
3045 | 3030 | ||
3046 | /* use index 1 context for tso */ | 3031 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
3047 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | 3032 | tx_desc->read.olinfo_status = 0; |
3048 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) | 3033 | |
3049 | olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; | 3034 | frag++; |
3050 | } | 3035 | } |
3051 | 3036 | ||
3052 | /* | 3037 | /* write last descriptor with RS and EOP bits */ |
3053 | * Check Context must be set if Tx switch is enabled, which it | 3038 | cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); |
3054 | * always is for case where virtual functions are running | 3039 | tx_desc->read.cmd_type_len = cmd_type; |
3040 | |||
3041 | /* set the timestamp */ | ||
3042 | first->time_stamp = jiffies; | ||
3043 | |||
3044 | /* Force memory writes to complete before letting h/w know there | ||
3045 | * are new descriptors to fetch. (Only applicable for weak-ordered | ||
3046 | * memory model archs, such as IA-64). | ||
3047 | * | ||
3048 | * We also need this memory barrier (wmb) to make certain all of the | ||
3049 | * status bits have been updated before next_to_watch is written. | ||
3055 | */ | 3050 | */ |
3056 | olinfo_status |= IXGBE_ADVTXD_CC; | 3051 | wmb(); |
3057 | 3052 | ||
3058 | olinfo_status |= ((skb->len - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); | 3053 | /* set next_to_watch value indicating a packet is present */ |
3054 | first->next_to_watch = tx_desc; | ||
3059 | 3055 | ||
3060 | i = tx_ring->next_to_use; | 3056 | i++; |
3061 | while (count--) { | 3057 | if (i == tx_ring->count) |
3062 | dma_addr_t dma; | 3058 | i = 0; |
3063 | unsigned int len; | ||
3064 | 3059 | ||
3065 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 3060 | tx_ring->next_to_use = i; |
3066 | dma = dma_unmap_addr(tx_buffer_info, dma); | ||
3067 | len = dma_unmap_len(tx_buffer_info, len); | ||
3068 | tx_desc = IXGBEVF_TX_DESC(tx_ring, i); | ||
3069 | tx_desc->read.buffer_addr = cpu_to_le64(dma); | ||
3070 | tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | len); | ||
3071 | tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); | ||
3072 | i++; | ||
3073 | if (i == tx_ring->count) | ||
3074 | i = 0; | ||
3075 | } | ||
3076 | 3061 | ||
3077 | tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); | 3062 | /* notify HW of packet */ |
3063 | writel(i, tx_ring->tail); | ||
3064 | |||
3065 | return; | ||
3066 | dma_error: | ||
3067 | dev_err(tx_ring->dev, "TX DMA map failed\n"); | ||
3068 | |||
3069 | /* clear dma mappings for failed tx_buffer_info map */ | ||
3070 | for (;;) { | ||
3071 | tx_buffer = &tx_ring->tx_buffer_info[i]; | ||
3072 | ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); | ||
3073 | if (tx_buffer == first) | ||
3074 | break; | ||
3075 | if (i == 0) | ||
3076 | i = tx_ring->count; | ||
3077 | i--; | ||
3078 | } | ||
3078 | 3079 | ||
3079 | tx_ring->next_to_use = i; | 3080 | tx_ring->next_to_use = i; |
3080 | } | 3081 | } |
@@ -3167,17 +3168,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
3167 | else | 3168 | else |
3168 | ixgbevf_tx_csum(tx_ring, first); | 3169 | ixgbevf_tx_csum(tx_ring, first); |
3169 | 3170 | ||
3170 | ixgbevf_tx_queue(tx_ring, first, | 3171 | ixgbevf_tx_map(tx_ring, first, hdr_len); |
3171 | ixgbevf_tx_map(tx_ring, first), hdr_len); | ||
3172 | |||
3173 | /* Force memory writes to complete before letting h/w | ||
3174 | * know there are new descriptors to fetch. (Only | ||
3175 | * applicable for weak-ordered memory model archs, | ||
3176 | * such as IA-64). | ||
3177 | */ | ||
3178 | wmb(); | ||
3179 | 3172 | ||
3180 | writel(tx_ring->next_to_use, tx_ring->tail); | ||
3181 | ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); | 3173 | ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); |
3182 | 3174 | ||
3183 | return NETDEV_TX_OK; | 3175 | return NETDEV_TX_OK; |