diff options
author | Anirban Chakraborty <anirban.chakraborty@qlogic.com> | 2011-04-01 10:28:11 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-04-06 15:47:11 -0400 |
commit | 036d61f05189c9c02de22dd19a1c64a4fd74a914 (patch) | |
tree | 412e39ed4e70f69c1b032a29ce28da805aa97ba6 /drivers | |
parent | b1fc6d3cfaff6fefd838b84532cb356f8a80da7b (diff) |
qlcnic: Code optimization patch
Optimized code resulted in achieving lower CPU utilization on transmit path
and higher throughput for small packet sizes (64 bytes).
Signed-off-by: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/qlcnic/qlcnic.h | 30 | ||||
-rw-r--r-- | drivers/net/qlcnic/qlcnic_main.c | 210 |
2 files changed, 115 insertions, 125 deletions
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 15d950a4f46d..a5b28d1475b1 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h | |||
@@ -434,50 +434,49 @@ struct qlcnic_adapter_stats { | |||
434 | * be one Rcv Descriptor for normal packets, one for jumbo and may be others. | 434 | * be one Rcv Descriptor for normal packets, one for jumbo and may be others. |
435 | */ | 435 | */ |
436 | struct qlcnic_host_rds_ring { | 436 | struct qlcnic_host_rds_ring { |
437 | u32 producer; | 437 | void __iomem *crb_rcv_producer; |
438 | struct rcv_desc *desc_head; | ||
439 | struct qlcnic_rx_buffer *rx_buf_arr; | ||
438 | u32 num_desc; | 440 | u32 num_desc; |
441 | u32 producer; | ||
439 | u32 dma_size; | 442 | u32 dma_size; |
440 | u32 skb_size; | 443 | u32 skb_size; |
441 | u32 flags; | 444 | u32 flags; |
442 | void __iomem *crb_rcv_producer; | ||
443 | struct rcv_desc *desc_head; | ||
444 | struct qlcnic_rx_buffer *rx_buf_arr; | ||
445 | struct list_head free_list; | 445 | struct list_head free_list; |
446 | spinlock_t lock; | 446 | spinlock_t lock; |
447 | dma_addr_t phys_addr; | 447 | dma_addr_t phys_addr; |
448 | }; | 448 | } ____cacheline_internodealigned_in_smp; |
449 | 449 | ||
450 | struct qlcnic_host_sds_ring { | 450 | struct qlcnic_host_sds_ring { |
451 | u32 consumer; | 451 | u32 consumer; |
452 | u32 num_desc; | 452 | u32 num_desc; |
453 | void __iomem *crb_sts_consumer; | 453 | void __iomem *crb_sts_consumer; |
454 | void __iomem *crb_intr_mask; | ||
455 | 454 | ||
456 | struct status_desc *desc_head; | 455 | struct status_desc *desc_head; |
457 | struct qlcnic_adapter *adapter; | 456 | struct qlcnic_adapter *adapter; |
458 | struct napi_struct napi; | 457 | struct napi_struct napi; |
459 | struct list_head free_list[NUM_RCV_DESC_RINGS]; | 458 | struct list_head free_list[NUM_RCV_DESC_RINGS]; |
460 | 459 | ||
460 | void __iomem *crb_intr_mask; | ||
461 | int irq; | 461 | int irq; |
462 | 462 | ||
463 | dma_addr_t phys_addr; | 463 | dma_addr_t phys_addr; |
464 | char name[IFNAMSIZ+4]; | 464 | char name[IFNAMSIZ+4]; |
465 | }; | 465 | } ____cacheline_internodealigned_in_smp; |
466 | 466 | ||
467 | struct qlcnic_host_tx_ring { | 467 | struct qlcnic_host_tx_ring { |
468 | u32 producer; | 468 | u32 producer; |
469 | __le32 *hw_consumer; | ||
470 | u32 sw_consumer; | 469 | u32 sw_consumer; |
471 | void __iomem *crb_cmd_producer; | ||
472 | u32 num_desc; | 470 | u32 num_desc; |
473 | 471 | void __iomem *crb_cmd_producer; | |
474 | struct netdev_queue *txq; | ||
475 | |||
476 | struct qlcnic_cmd_buffer *cmd_buf_arr; | ||
477 | struct cmd_desc_type0 *desc_head; | 472 | struct cmd_desc_type0 *desc_head; |
473 | struct qlcnic_cmd_buffer *cmd_buf_arr; | ||
474 | __le32 *hw_consumer; | ||
475 | |||
478 | dma_addr_t phys_addr; | 476 | dma_addr_t phys_addr; |
479 | dma_addr_t hw_cons_phys_addr; | 477 | dma_addr_t hw_cons_phys_addr; |
480 | }; | 478 | struct netdev_queue *txq; |
479 | } ____cacheline_internodealigned_in_smp; | ||
481 | 480 | ||
482 | /* | 481 | /* |
483 | * Receive context. There is one such structure per instance of the | 482 | * Receive context. There is one such structure per instance of the |
@@ -1328,8 +1327,7 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = { | |||
1328 | 1327 | ||
1329 | static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) | 1328 | static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) |
1330 | { | 1329 | { |
1331 | smp_mb(); | 1330 | if (likely(tx_ring->producer < tx_ring->sw_consumer)) |
1332 | if (tx_ring->producer < tx_ring->sw_consumer) | ||
1333 | return tx_ring->sw_consumer - tx_ring->producer; | 1331 | return tx_ring->sw_consumer - tx_ring->producer; |
1334 | else | 1332 | else |
1335 | return tx_ring->sw_consumer + tx_ring->num_desc - | 1333 | return tx_ring->sw_consumer + tx_ring->num_desc - |
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index dde7e4403830..3b740f55ca42 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c | |||
@@ -1861,6 +1861,7 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter, | |||
1861 | vlan_req->vlan_id = vlan_id; | 1861 | vlan_req->vlan_id = vlan_id; |
1862 | 1862 | ||
1863 | tx_ring->producer = get_next_index(producer, tx_ring->num_desc); | 1863 | tx_ring->producer = get_next_index(producer, tx_ring->num_desc); |
1864 | smp_mb(); | ||
1864 | } | 1865 | } |
1865 | 1866 | ||
1866 | #define QLCNIC_MAC_HASH(MAC)\ | 1867 | #define QLCNIC_MAC_HASH(MAC)\ |
@@ -1921,58 +1922,122 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter, | |||
1921 | spin_unlock(&adapter->mac_learn_lock); | 1922 | spin_unlock(&adapter->mac_learn_lock); |
1922 | } | 1923 | } |
1923 | 1924 | ||
1924 | static void | 1925 | static int |
1925 | qlcnic_tso_check(struct net_device *netdev, | 1926 | qlcnic_tx_pkt(struct qlcnic_adapter *adapter, |
1926 | struct qlcnic_host_tx_ring *tx_ring, | ||
1927 | struct cmd_desc_type0 *first_desc, | 1927 | struct cmd_desc_type0 *first_desc, |
1928 | struct sk_buff *skb) | 1928 | struct sk_buff *skb) |
1929 | { | 1929 | { |
1930 | u8 opcode = TX_ETHER_PKT; | 1930 | u8 opcode = 0, hdr_len = 0; |
1931 | __be16 protocol = skb->protocol; | 1931 | u16 flags = 0, vlan_tci = 0; |
1932 | u16 flags = 0; | 1932 | int copied, offset, copy_len; |
1933 | int copied, offset, copy_len, hdr_len = 0, tso = 0; | ||
1934 | struct cmd_desc_type0 *hwdesc; | 1933 | struct cmd_desc_type0 *hwdesc; |
1935 | struct vlan_ethhdr *vh; | 1934 | struct vlan_ethhdr *vh; |
1936 | struct qlcnic_adapter *adapter = netdev_priv(netdev); | 1935 | struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; |
1936 | u16 protocol = ntohs(skb->protocol); | ||
1937 | u32 producer = tx_ring->producer; | 1937 | u32 producer = tx_ring->producer; |
1938 | __le16 vlan_oob = first_desc->flags_opcode & | 1938 | |
1939 | cpu_to_le16(FLAGS_VLAN_OOB); | 1939 | if (protocol == ETH_P_8021Q) { |
1940 | vh = (struct vlan_ethhdr *)skb->data; | ||
1941 | flags = FLAGS_VLAN_TAGGED; | ||
1942 | vlan_tci = vh->h_vlan_TCI; | ||
1943 | } else if (vlan_tx_tag_present(skb)) { | ||
1944 | flags = FLAGS_VLAN_OOB; | ||
1945 | vlan_tci = vlan_tx_tag_get(skb); | ||
1946 | } | ||
1947 | if (unlikely(adapter->pvid)) { | ||
1948 | if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED)) | ||
1949 | return -EIO; | ||
1950 | if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) | ||
1951 | goto set_flags; | ||
1952 | |||
1953 | flags = FLAGS_VLAN_OOB; | ||
1954 | vlan_tci = adapter->pvid; | ||
1955 | } | ||
1956 | set_flags: | ||
1957 | qlcnic_set_tx_vlan_tci(first_desc, vlan_tci); | ||
1958 | qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); | ||
1940 | 1959 | ||
1941 | if (*(skb->data) & BIT_0) { | 1960 | if (*(skb->data) & BIT_0) { |
1942 | flags |= BIT_0; | 1961 | flags |= BIT_0; |
1943 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); | 1962 | memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); |
1944 | } | 1963 | } |
1945 | 1964 | opcode = TX_ETHER_PKT; | |
1946 | if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && | 1965 | if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) && |
1947 | skb_shinfo(skb)->gso_size > 0) { | 1966 | skb_shinfo(skb)->gso_size > 0) { |
1948 | 1967 | ||
1949 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 1968 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
1950 | 1969 | ||
1951 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | 1970 | first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
1952 | first_desc->total_hdr_length = hdr_len; | 1971 | first_desc->total_hdr_length = hdr_len; |
1953 | if (vlan_oob) { | 1972 | |
1973 | opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; | ||
1974 | |||
1975 | /* For LSO, we need to copy the MAC/IP/TCP headers into | ||
1976 | * the descriptor ring */ | ||
1977 | copied = 0; | ||
1978 | offset = 2; | ||
1979 | |||
1980 | if (flags & FLAGS_VLAN_OOB) { | ||
1954 | first_desc->total_hdr_length += VLAN_HLEN; | 1981 | first_desc->total_hdr_length += VLAN_HLEN; |
1955 | first_desc->tcp_hdr_offset = VLAN_HLEN; | 1982 | first_desc->tcp_hdr_offset = VLAN_HLEN; |
1956 | first_desc->ip_hdr_offset = VLAN_HLEN; | 1983 | first_desc->ip_hdr_offset = VLAN_HLEN; |
1957 | /* Only in case of TSO on vlan device */ | 1984 | /* Only in case of TSO on vlan device */ |
1958 | flags |= FLAGS_VLAN_TAGGED; | 1985 | flags |= FLAGS_VLAN_TAGGED; |
1986 | |||
1987 | /* Create a TSO vlan header template for firmware */ | ||
1988 | |||
1989 | hwdesc = &tx_ring->desc_head[producer]; | ||
1990 | tx_ring->cmd_buf_arr[producer].skb = NULL; | ||
1991 | |||
1992 | copy_len = min((int)sizeof(struct cmd_desc_type0) - | ||
1993 | offset, hdr_len + VLAN_HLEN); | ||
1994 | |||
1995 | vh = (struct vlan_ethhdr *)((char *) hwdesc + 2); | ||
1996 | skb_copy_from_linear_data(skb, vh, 12); | ||
1997 | vh->h_vlan_proto = htons(ETH_P_8021Q); | ||
1998 | vh->h_vlan_TCI = htons(vlan_tci); | ||
1999 | |||
2000 | skb_copy_from_linear_data_offset(skb, 12, | ||
2001 | (char *)vh + 16, copy_len - 16); | ||
2002 | |||
2003 | copied = copy_len - VLAN_HLEN; | ||
2004 | offset = 0; | ||
2005 | |||
2006 | producer = get_next_index(producer, tx_ring->num_desc); | ||
1959 | } | 2007 | } |
1960 | 2008 | ||
1961 | opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ? | 2009 | while (copied < hdr_len) { |
1962 | TX_TCP_LSO6 : TX_TCP_LSO; | 2010 | |
1963 | tso = 1; | 2011 | copy_len = min((int)sizeof(struct cmd_desc_type0) - |
2012 | offset, (hdr_len - copied)); | ||
2013 | |||
2014 | hwdesc = &tx_ring->desc_head[producer]; | ||
2015 | tx_ring->cmd_buf_arr[producer].skb = NULL; | ||
2016 | |||
2017 | skb_copy_from_linear_data_offset(skb, copied, | ||
2018 | (char *) hwdesc + offset, copy_len); | ||
2019 | |||
2020 | copied += copy_len; | ||
2021 | offset = 0; | ||
2022 | |||
2023 | producer = get_next_index(producer, tx_ring->num_desc); | ||
2024 | } | ||
2025 | |||
2026 | tx_ring->producer = producer; | ||
2027 | smp_mb(); | ||
2028 | adapter->stats.lso_frames++; | ||
1964 | 2029 | ||
1965 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2030 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1966 | u8 l4proto; | 2031 | u8 l4proto; |
1967 | 2032 | ||
1968 | if (protocol == cpu_to_be16(ETH_P_IP)) { | 2033 | if (protocol == ETH_P_IP) { |
1969 | l4proto = ip_hdr(skb)->protocol; | 2034 | l4proto = ip_hdr(skb)->protocol; |
1970 | 2035 | ||
1971 | if (l4proto == IPPROTO_TCP) | 2036 | if (l4proto == IPPROTO_TCP) |
1972 | opcode = TX_TCP_PKT; | 2037 | opcode = TX_TCP_PKT; |
1973 | else if (l4proto == IPPROTO_UDP) | 2038 | else if (l4proto == IPPROTO_UDP) |
1974 | opcode = TX_UDP_PKT; | 2039 | opcode = TX_UDP_PKT; |
1975 | } else if (protocol == cpu_to_be16(ETH_P_IPV6)) { | 2040 | } else if (protocol == ETH_P_IPV6) { |
1976 | l4proto = ipv6_hdr(skb)->nexthdr; | 2041 | l4proto = ipv6_hdr(skb)->nexthdr; |
1977 | 2042 | ||
1978 | if (l4proto == IPPROTO_TCP) | 2043 | if (l4proto == IPPROTO_TCP) |
@@ -1981,63 +2046,11 @@ qlcnic_tso_check(struct net_device *netdev, | |||
1981 | opcode = TX_UDPV6_PKT; | 2046 | opcode = TX_UDPV6_PKT; |
1982 | } | 2047 | } |
1983 | } | 2048 | } |
1984 | |||
1985 | first_desc->tcp_hdr_offset += skb_transport_offset(skb); | 2049 | first_desc->tcp_hdr_offset += skb_transport_offset(skb); |
1986 | first_desc->ip_hdr_offset += skb_network_offset(skb); | 2050 | first_desc->ip_hdr_offset += skb_network_offset(skb); |
1987 | qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); | 2051 | qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); |
1988 | 2052 | ||
1989 | if (!tso) | 2053 | return 0; |
1990 | return; | ||
1991 | |||
1992 | /* For LSO, we need to copy the MAC/IP/TCP headers into | ||
1993 | * the descriptor ring | ||
1994 | */ | ||
1995 | copied = 0; | ||
1996 | offset = 2; | ||
1997 | |||
1998 | if (vlan_oob) { | ||
1999 | /* Create a TSO vlan header template for firmware */ | ||
2000 | |||
2001 | hwdesc = &tx_ring->desc_head[producer]; | ||
2002 | tx_ring->cmd_buf_arr[producer].skb = NULL; | ||
2003 | |||
2004 | copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, | ||
2005 | hdr_len + VLAN_HLEN); | ||
2006 | |||
2007 | vh = (struct vlan_ethhdr *)((char *)hwdesc + 2); | ||
2008 | skb_copy_from_linear_data(skb, vh, 12); | ||
2009 | vh->h_vlan_proto = htons(ETH_P_8021Q); | ||
2010 | vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI); | ||
2011 | |||
2012 | skb_copy_from_linear_data_offset(skb, 12, | ||
2013 | (char *)vh + 16, copy_len - 16); | ||
2014 | |||
2015 | copied = copy_len - VLAN_HLEN; | ||
2016 | offset = 0; | ||
2017 | |||
2018 | producer = get_next_index(producer, tx_ring->num_desc); | ||
2019 | } | ||
2020 | |||
2021 | while (copied < hdr_len) { | ||
2022 | |||
2023 | copy_len = min((int)sizeof(struct cmd_desc_type0) - offset, | ||
2024 | (hdr_len - copied)); | ||
2025 | |||
2026 | hwdesc = &tx_ring->desc_head[producer]; | ||
2027 | tx_ring->cmd_buf_arr[producer].skb = NULL; | ||
2028 | |||
2029 | skb_copy_from_linear_data_offset(skb, copied, | ||
2030 | (char *)hwdesc + offset, copy_len); | ||
2031 | |||
2032 | copied += copy_len; | ||
2033 | offset = 0; | ||
2034 | |||
2035 | producer = get_next_index(producer, tx_ring->num_desc); | ||
2036 | } | ||
2037 | |||
2038 | tx_ring->producer = producer; | ||
2039 | barrier(); | ||
2040 | adapter->stats.lso_frames++; | ||
2041 | } | 2054 | } |
2042 | 2055 | ||
2043 | static int | 2056 | static int |
@@ -2088,39 +2101,21 @@ out_err: | |||
2088 | return -ENOMEM; | 2101 | return -ENOMEM; |
2089 | } | 2102 | } |
2090 | 2103 | ||
2091 | static int | 2104 | static void |
2092 | qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter, | 2105 | qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, |
2093 | struct sk_buff *skb, | 2106 | struct qlcnic_cmd_buffer *pbuf) |
2094 | struct cmd_desc_type0 *first_desc) | ||
2095 | { | 2107 | { |
2096 | u8 opcode = 0; | 2108 | struct qlcnic_skb_frag *nf = &pbuf->frag_array[0]; |
2097 | u16 flags = 0; | 2109 | int nr_frags = skb_shinfo(skb)->nr_frags; |
2098 | __be16 protocol = skb->protocol; | 2110 | int i; |
2099 | struct vlan_ethhdr *vh; | ||
2100 | 2111 | ||
2101 | if (protocol == cpu_to_be16(ETH_P_8021Q)) { | 2112 | for (i = 0; i < nr_frags; i++) { |
2102 | vh = (struct vlan_ethhdr *)skb->data; | 2113 | nf = &pbuf->frag_array[i+1]; |
2103 | protocol = vh->h_vlan_encapsulated_proto; | 2114 | pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); |
2104 | flags = FLAGS_VLAN_TAGGED; | ||
2105 | qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI)); | ||
2106 | } else if (vlan_tx_tag_present(skb)) { | ||
2107 | flags = FLAGS_VLAN_OOB; | ||
2108 | qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb)); | ||
2109 | } | 2115 | } |
2110 | if (unlikely(adapter->pvid)) { | ||
2111 | if (first_desc->vlan_TCI && | ||
2112 | !(adapter->flags & QLCNIC_TAGGING_ENABLED)) | ||
2113 | return -EIO; | ||
2114 | if (first_desc->vlan_TCI && | ||
2115 | (adapter->flags & QLCNIC_TAGGING_ENABLED)) | ||
2116 | goto set_flags; | ||
2117 | 2116 | ||
2118 | flags = FLAGS_VLAN_OOB; | 2117 | nf = &pbuf->frag_array[0]; |
2119 | qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid); | 2118 | pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); |
2120 | } | ||
2121 | set_flags: | ||
2122 | qlcnic_set_tx_flags_opcode(first_desc, flags, opcode); | ||
2123 | return 0; | ||
2124 | } | 2119 | } |
2125 | 2120 | ||
2126 | static inline void | 2121 | static inline void |
@@ -2144,7 +2139,7 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2144 | int i, k; | 2139 | int i, k; |
2145 | 2140 | ||
2146 | u32 producer; | 2141 | u32 producer; |
2147 | int frag_count, no_of_desc; | 2142 | int frag_count; |
2148 | u32 num_txd = tx_ring->num_desc; | 2143 | u32 num_txd = tx_ring->num_desc; |
2149 | 2144 | ||
2150 | if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { | 2145 | if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { |
@@ -2161,12 +2156,8 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2161 | 2156 | ||
2162 | frag_count = skb_shinfo(skb)->nr_frags + 1; | 2157 | frag_count = skb_shinfo(skb)->nr_frags + 1; |
2163 | 2158 | ||
2164 | /* 4 fragments per cmd des */ | ||
2165 | no_of_desc = (frag_count + 3) >> 2; | ||
2166 | |||
2167 | if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { | 2159 | if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { |
2168 | netif_stop_queue(netdev); | 2160 | netif_stop_queue(netdev); |
2169 | smp_mb(); | ||
2170 | if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) | 2161 | if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) |
2171 | netif_start_queue(netdev); | 2162 | netif_start_queue(netdev); |
2172 | else { | 2163 | else { |
@@ -2183,9 +2174,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2183 | first_desc = hwdesc = &tx_ring->desc_head[producer]; | 2174 | first_desc = hwdesc = &tx_ring->desc_head[producer]; |
2184 | qlcnic_clear_cmddesc((u64 *)hwdesc); | 2175 | qlcnic_clear_cmddesc((u64 *)hwdesc); |
2185 | 2176 | ||
2186 | if (qlcnic_check_tx_tagging(adapter, skb, first_desc)) | ||
2187 | goto drop_packet; | ||
2188 | |||
2189 | if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { | 2177 | if (qlcnic_map_tx_skb(pdev, skb, pbuf)) { |
2190 | adapter->stats.tx_dma_map_error++; | 2178 | adapter->stats.tx_dma_map_error++; |
2191 | goto drop_packet; | 2179 | goto drop_packet; |
@@ -2229,8 +2217,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2229 | } | 2217 | } |
2230 | 2218 | ||
2231 | tx_ring->producer = get_next_index(producer, num_txd); | 2219 | tx_ring->producer = get_next_index(producer, num_txd); |
2220 | smp_mb(); | ||
2232 | 2221 | ||
2233 | qlcnic_tso_check(netdev, tx_ring, first_desc, skb); | 2222 | if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) |
2223 | goto unwind_buff; | ||
2234 | 2224 | ||
2235 | if (qlcnic_mac_learn) | 2225 | if (qlcnic_mac_learn) |
2236 | qlcnic_send_filter(adapter, tx_ring, first_desc, skb); | 2226 | qlcnic_send_filter(adapter, tx_ring, first_desc, skb); |
@@ -2242,6 +2232,8 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
2242 | 2232 | ||
2243 | return NETDEV_TX_OK; | 2233 | return NETDEV_TX_OK; |
2244 | 2234 | ||
2235 | unwind_buff: | ||
2236 | qlcnic_unmap_buffers(pdev, skb, pbuf); | ||
2245 | drop_packet: | 2237 | drop_packet: |
2246 | adapter->stats.txdropped++; | 2238 | adapter->stats.txdropped++; |
2247 | dev_kfree_skb_any(skb); | 2239 | dev_kfree_skb_any(skb); |