diff options
author | David S. Miller <davem@davemloft.net> | 2012-03-17 05:02:26 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-03-17 05:02:26 -0400 |
commit | 81a430ac1b88b0702c57d2513e247317e810e04d (patch) | |
tree | 5d4b825c9b568b569833ec72cc5cbf43e1e6031c /drivers | |
parent | c2ec3ff6b8712f5d951927d7774c805fe3270caa (diff) | |
parent | 729739b754affa482e92fa7836e4066096089d11 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/ethernet/intel/igb/e1000_defines.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 37 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 60 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 23 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 1261 |
6 files changed, 772 insertions, 628 deletions
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index aed217449f0d..89eb1f85b9fa 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h | |||
@@ -134,6 +134,8 @@ | |||
134 | #define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ | 134 | #define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ |
135 | #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ | 135 | #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ |
136 | #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ | 136 | #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ |
137 | #define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ | ||
138 | #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ | ||
137 | #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ | 139 | #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ |
138 | 140 | ||
139 | /* | 141 | /* |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e96cef89f121..c4902411d749 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -1769,10 +1769,21 @@ static int igb_set_features(struct net_device *netdev, | |||
1769 | netdev_features_t features) | 1769 | netdev_features_t features) |
1770 | { | 1770 | { |
1771 | netdev_features_t changed = netdev->features ^ features; | 1771 | netdev_features_t changed = netdev->features ^ features; |
1772 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
1772 | 1773 | ||
1773 | if (changed & NETIF_F_HW_VLAN_RX) | 1774 | if (changed & NETIF_F_HW_VLAN_RX) |
1774 | igb_vlan_mode(netdev, features); | 1775 | igb_vlan_mode(netdev, features); |
1775 | 1776 | ||
1777 | if (!(changed & NETIF_F_RXALL)) | ||
1778 | return 0; | ||
1779 | |||
1780 | netdev->features = features; | ||
1781 | |||
1782 | if (netif_running(netdev)) | ||
1783 | igb_reinit_locked(adapter); | ||
1784 | else | ||
1785 | igb_reset(adapter); | ||
1786 | |||
1776 | return 0; | 1787 | return 0; |
1777 | } | 1788 | } |
1778 | 1789 | ||
@@ -1954,6 +1965,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1954 | 1965 | ||
1955 | /* copy netdev features into list of user selectable features */ | 1966 | /* copy netdev features into list of user selectable features */ |
1956 | netdev->hw_features |= netdev->features; | 1967 | netdev->hw_features |= netdev->features; |
1968 | netdev->hw_features |= NETIF_F_RXALL; | ||
1957 | 1969 | ||
1958 | /* set this bit last since it cannot be part of hw_features */ | 1970 | /* set this bit last since it cannot be part of hw_features */ |
1959 | netdev->features |= NETIF_F_HW_VLAN_FILTER; | 1971 | netdev->features |= NETIF_F_HW_VLAN_FILTER; |
@@ -1964,6 +1976,8 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1964 | NETIF_F_IPV6_CSUM | | 1976 | NETIF_F_IPV6_CSUM | |
1965 | NETIF_F_SG; | 1977 | NETIF_F_SG; |
1966 | 1978 | ||
1979 | netdev->priv_flags |= IFF_SUPP_NOFCS; | ||
1980 | |||
1967 | if (pci_using_dac) { | 1981 | if (pci_using_dac) { |
1968 | netdev->features |= NETIF_F_HIGHDMA; | 1982 | netdev->features |= NETIF_F_HIGHDMA; |
1969 | netdev->vlan_features |= NETIF_F_HIGHDMA; | 1983 | netdev->vlan_features |= NETIF_F_HIGHDMA; |
@@ -3003,6 +3017,22 @@ void igb_setup_rctl(struct igb_adapter *adapter) | |||
3003 | wr32(E1000_QDE, ALL_QUEUES); | 3017 | wr32(E1000_QDE, ALL_QUEUES); |
3004 | } | 3018 | } |
3005 | 3019 | ||
3020 | /* This is useful for sniffing bad packets. */ | ||
3021 | if (adapter->netdev->features & NETIF_F_RXALL) { | ||
3022 | /* UPE and MPE will be handled by normal PROMISC logic | ||
3023 | * in e1000e_set_rx_mode */ | ||
3024 | rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ | ||
3025 | E1000_RCTL_BAM | /* RX All Bcast Pkts */ | ||
3026 | E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ | ||
3027 | |||
3028 | rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ | ||
3029 | E1000_RCTL_DPF | /* Allow filtered pause */ | ||
3030 | E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ | ||
3031 | /* Do not mess with E1000_CTRL_VME, it affects transmit as well, | ||
3032 | * and that breaks VLANs. | ||
3033 | */ | ||
3034 | } | ||
3035 | |||
3006 | wr32(E1000_RCTL, rctl); | 3036 | wr32(E1000_RCTL, rctl); |
3007 | } | 3037 | } |
3008 | 3038 | ||
@@ -4293,6 +4323,8 @@ static void igb_tx_map(struct igb_ring *tx_ring, | |||
4293 | 4323 | ||
4294 | /* write last descriptor with RS and EOP bits */ | 4324 | /* write last descriptor with RS and EOP bits */ |
4295 | cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD); | 4325 | cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD); |
4326 | if (unlikely(skb->no_fcs)) | ||
4327 | cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS)); | ||
4296 | tx_desc->read.cmd_type_len = cmd_type; | 4328 | tx_desc->read.cmd_type_len = cmd_type; |
4297 | 4329 | ||
4298 | /* set the timestamp */ | 4330 | /* set the timestamp */ |
@@ -6098,8 +6130,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) | |||
6098 | goto next_desc; | 6130 | goto next_desc; |
6099 | } | 6131 | } |
6100 | 6132 | ||
6101 | if (igb_test_staterr(rx_desc, | 6133 | if (unlikely((igb_test_staterr(rx_desc, |
6102 | E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { | 6134 | E1000_RXDEXT_ERR_FRAME_ERR_MASK)) |
6135 | && !(rx_ring->netdev->features & NETIF_F_RXALL))) { | ||
6103 | dev_kfree_skb_any(skb); | 6136 | dev_kfree_skb_any(skb); |
6104 | goto next_desc; | 6137 | goto next_desc; |
6105 | } | 6138 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index f069c1b10753..e0d809d0ed75 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h | |||
@@ -72,12 +72,6 @@ | |||
72 | 72 | ||
73 | /* Supported Rx Buffer Sizes */ | 73 | /* Supported Rx Buffer Sizes */ |
74 | #define IXGBE_RXBUFFER_512 512 /* Used for packet split */ | 74 | #define IXGBE_RXBUFFER_512 512 /* Used for packet split */ |
75 | #define IXGBE_RXBUFFER_2K 2048 | ||
76 | #define IXGBE_RXBUFFER_3K 3072 | ||
77 | #define IXGBE_RXBUFFER_4K 4096 | ||
78 | #define IXGBE_RXBUFFER_7K 7168 | ||
79 | #define IXGBE_RXBUFFER_8K 8192 | ||
80 | #define IXGBE_RXBUFFER_15K 15360 | ||
81 | #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ | 75 | #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ |
82 | 76 | ||
83 | /* | 77 | /* |
@@ -102,7 +96,6 @@ | |||
102 | #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) | 96 | #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) |
103 | #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) | 97 | #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) |
104 | #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) | 98 | #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) |
105 | #define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8) | ||
106 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 | 99 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 |
107 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 | 100 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 |
108 | #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 | 101 | #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 |
@@ -156,19 +149,18 @@ struct vf_macvlans { | |||
156 | struct ixgbe_tx_buffer { | 149 | struct ixgbe_tx_buffer { |
157 | union ixgbe_adv_tx_desc *next_to_watch; | 150 | union ixgbe_adv_tx_desc *next_to_watch; |
158 | unsigned long time_stamp; | 151 | unsigned long time_stamp; |
159 | dma_addr_t dma; | ||
160 | u32 length; | ||
161 | u32 tx_flags; | ||
162 | struct sk_buff *skb; | 152 | struct sk_buff *skb; |
163 | u32 bytecount; | 153 | unsigned int bytecount; |
164 | u16 gso_segs; | 154 | unsigned short gso_segs; |
155 | DEFINE_DMA_UNMAP_ADDR(dma); | ||
156 | DEFINE_DMA_UNMAP_LEN(len); | ||
157 | u32 tx_flags; | ||
165 | }; | 158 | }; |
166 | 159 | ||
167 | struct ixgbe_rx_buffer { | 160 | struct ixgbe_rx_buffer { |
168 | struct sk_buff *skb; | 161 | struct sk_buff *skb; |
169 | dma_addr_t dma; | 162 | dma_addr_t dma; |
170 | struct page *page; | 163 | struct page *page; |
171 | dma_addr_t page_dma; | ||
172 | unsigned int page_offset; | 164 | unsigned int page_offset; |
173 | }; | 165 | }; |
174 | 166 | ||
@@ -180,7 +172,6 @@ struct ixgbe_queue_stats { | |||
180 | struct ixgbe_tx_queue_stats { | 172 | struct ixgbe_tx_queue_stats { |
181 | u64 restart_queue; | 173 | u64 restart_queue; |
182 | u64 tx_busy; | 174 | u64 tx_busy; |
183 | u64 completed; | ||
184 | u64 tx_done_old; | 175 | u64 tx_done_old; |
185 | }; | 176 | }; |
186 | 177 | ||
@@ -193,21 +184,15 @@ struct ixgbe_rx_queue_stats { | |||
193 | u64 csum_err; | 184 | u64 csum_err; |
194 | }; | 185 | }; |
195 | 186 | ||
196 | enum ixbge_ring_state_t { | 187 | enum ixgbe_ring_state_t { |
197 | __IXGBE_TX_FDIR_INIT_DONE, | 188 | __IXGBE_TX_FDIR_INIT_DONE, |
198 | __IXGBE_TX_DETECT_HANG, | 189 | __IXGBE_TX_DETECT_HANG, |
199 | __IXGBE_HANG_CHECK_ARMED, | 190 | __IXGBE_HANG_CHECK_ARMED, |
200 | __IXGBE_RX_PS_ENABLED, | ||
201 | __IXGBE_RX_RSC_ENABLED, | 191 | __IXGBE_RX_RSC_ENABLED, |
202 | __IXGBE_RX_CSUM_UDP_ZERO_ERR, | 192 | __IXGBE_RX_CSUM_UDP_ZERO_ERR, |
193 | __IXGBE_RX_FCOE_BUFSZ, | ||
203 | }; | 194 | }; |
204 | 195 | ||
205 | #define ring_is_ps_enabled(ring) \ | ||
206 | test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) | ||
207 | #define set_ring_ps_enabled(ring) \ | ||
208 | set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) | ||
209 | #define clear_ring_ps_enabled(ring) \ | ||
210 | clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state) | ||
211 | #define check_for_tx_hang(ring) \ | 196 | #define check_for_tx_hang(ring) \ |
212 | test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) | 197 | test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) |
213 | #define set_check_for_tx_hang(ring) \ | 198 | #define set_check_for_tx_hang(ring) \ |
@@ -233,7 +218,6 @@ struct ixgbe_ring { | |||
233 | u8 __iomem *tail; | 218 | u8 __iomem *tail; |
234 | 219 | ||
235 | u16 count; /* amount of descriptors */ | 220 | u16 count; /* amount of descriptors */ |
236 | u16 rx_buf_len; | ||
237 | 221 | ||
238 | u8 queue_index; /* needed for multiqueue queue management */ | 222 | u8 queue_index; /* needed for multiqueue queue management */ |
239 | u8 reg_idx; /* holds the special value that gets | 223 | u8 reg_idx; /* holds the special value that gets |
@@ -241,8 +225,13 @@ struct ixgbe_ring { | |||
241 | * associated with this ring, which is | 225 | * associated with this ring, which is |
242 | * different for DCB and RSS modes | 226 | * different for DCB and RSS modes |
243 | */ | 227 | */ |
244 | u8 atr_sample_rate; | 228 | union { |
245 | u8 atr_count; | 229 | struct { |
230 | u8 atr_sample_rate; | ||
231 | u8 atr_count; | ||
232 | }; | ||
233 | u16 next_to_alloc; | ||
234 | }; | ||
246 | 235 | ||
247 | u16 next_to_use; | 236 | u16 next_to_use; |
248 | u16 next_to_clean; | 237 | u16 next_to_clean; |
@@ -287,6 +276,22 @@ struct ixgbe_ring_feature { | |||
287 | int mask; | 276 | int mask; |
288 | } ____cacheline_internodealigned_in_smp; | 277 | } ____cacheline_internodealigned_in_smp; |
289 | 278 | ||
279 | /* | ||
280 | * FCoE requires that all Rx buffers be over 2200 bytes in length. Since | ||
281 | * this is twice the size of a half page we need to double the page order | ||
282 | * for FCoE enabled Rx queues. | ||
283 | */ | ||
284 | #if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) | ||
285 | static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) | ||
286 | { | ||
287 | return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0; | ||
288 | } | ||
289 | #else | ||
290 | #define ixgbe_rx_pg_order(_ring) 0 | ||
291 | #endif | ||
292 | #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) | ||
293 | #define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring)) | ||
294 | |||
290 | struct ixgbe_ring_container { | 295 | struct ixgbe_ring_container { |
291 | struct ixgbe_ring *ring; /* pointer to linked list of rings */ | 296 | struct ixgbe_ring *ring; /* pointer to linked list of rings */ |
292 | unsigned int total_bytes; /* total bytes processed this int */ | 297 | unsigned int total_bytes; /* total bytes processed this int */ |
@@ -554,7 +559,7 @@ struct ixgbe_cb { | |||
554 | }; | 559 | }; |
555 | dma_addr_t dma; | 560 | dma_addr_t dma; |
556 | u16 append_cnt; | 561 | u16 append_cnt; |
557 | bool delay_unmap; | 562 | bool page_released; |
558 | }; | 563 | }; |
559 | #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) | 564 | #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) |
560 | 565 | ||
@@ -625,7 +630,8 @@ extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); | |||
625 | extern void ixgbe_do_reset(struct net_device *netdev); | 630 | extern void ixgbe_do_reset(struct net_device *netdev); |
626 | #ifdef IXGBE_FCOE | 631 | #ifdef IXGBE_FCOE |
627 | extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); | 632 | extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); |
628 | extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, | 633 | extern int ixgbe_fso(struct ixgbe_ring *tx_ring, |
634 | struct ixgbe_tx_buffer *first, | ||
629 | u32 tx_flags, u8 *hdr_len); | 635 | u32 tx_flags, u8 *hdr_len); |
630 | extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); | 636 | extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); |
631 | extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, | 637 | extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 24f7291dbe57..b09e67cc9d6e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/netdevice.h> | 35 | #include <linux/netdevice.h> |
36 | #include <linux/ethtool.h> | 36 | #include <linux/ethtool.h> |
37 | #include <linux/vmalloc.h> | 37 | #include <linux/vmalloc.h> |
38 | #include <linux/highmem.h> | ||
38 | #include <linux/uaccess.h> | 39 | #include <linux/uaccess.h> |
39 | 40 | ||
40 | #include "ixgbe.h" | 41 | #include "ixgbe.h" |
@@ -1615,7 +1616,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) | |||
1615 | rx_ring->dev = &adapter->pdev->dev; | 1616 | rx_ring->dev = &adapter->pdev->dev; |
1616 | rx_ring->netdev = adapter->netdev; | 1617 | rx_ring->netdev = adapter->netdev; |
1617 | rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; | 1618 | rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; |
1618 | rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K; | ||
1619 | 1619 | ||
1620 | err = ixgbe_setup_rx_resources(rx_ring); | 1620 | err = ixgbe_setup_rx_resources(rx_ring); |
1621 | if (err) { | 1621 | if (err) { |
@@ -1718,13 +1718,15 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, | |||
1718 | 1718 | ||
1719 | frame_size >>= 1; | 1719 | frame_size >>= 1; |
1720 | 1720 | ||
1721 | data = rx_buffer->skb->data; | 1721 | data = kmap(rx_buffer->page) + rx_buffer->page_offset; |
1722 | 1722 | ||
1723 | if (data[3] != 0xFF || | 1723 | if (data[3] != 0xFF || |
1724 | data[frame_size + 10] != 0xBE || | 1724 | data[frame_size + 10] != 0xBE || |
1725 | data[frame_size + 12] != 0xAF) | 1725 | data[frame_size + 12] != 0xAF) |
1726 | match = false; | 1726 | match = false; |
1727 | 1727 | ||
1728 | kunmap(rx_buffer->page); | ||
1729 | |||
1728 | return match; | 1730 | return match; |
1729 | } | 1731 | } |
1730 | 1732 | ||
@@ -1746,17 +1748,22 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, | |||
1746 | /* check Rx buffer */ | 1748 | /* check Rx buffer */ |
1747 | rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; | 1749 | rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; |
1748 | 1750 | ||
1749 | /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ | 1751 | /* sync Rx buffer for CPU read */ |
1750 | dma_unmap_single(rx_ring->dev, | 1752 | dma_sync_single_for_cpu(rx_ring->dev, |
1751 | rx_buffer->dma, | 1753 | rx_buffer->dma, |
1752 | rx_ring->rx_buf_len, | 1754 | ixgbe_rx_bufsz(rx_ring), |
1753 | DMA_FROM_DEVICE); | 1755 | DMA_FROM_DEVICE); |
1754 | rx_buffer->dma = 0; | ||
1755 | 1756 | ||
1756 | /* verify contents of skb */ | 1757 | /* verify contents of skb */ |
1757 | if (ixgbe_check_lbtest_frame(rx_buffer, size)) | 1758 | if (ixgbe_check_lbtest_frame(rx_buffer, size)) |
1758 | count++; | 1759 | count++; |
1759 | 1760 | ||
1761 | /* sync Rx buffer for device write */ | ||
1762 | dma_sync_single_for_device(rx_ring->dev, | ||
1763 | rx_buffer->dma, | ||
1764 | ixgbe_rx_bufsz(rx_ring), | ||
1765 | DMA_FROM_DEVICE); | ||
1766 | |||
1760 | /* unmap buffer on Tx side */ | 1767 | /* unmap buffer on Tx side */ |
1761 | tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; | 1768 | tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; |
1762 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); | 1769 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index da7da752b6b4..5f943d3f85c4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | |||
@@ -447,7 +447,7 @@ ddp_out: | |||
447 | /** | 447 | /** |
448 | * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) | 448 | * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) |
449 | * @tx_ring: tx desc ring | 449 | * @tx_ring: tx desc ring |
450 | * @skb: associated skb | 450 | * @first: first tx_buffer structure containing skb, tx_flags, and protocol |
451 | * @tx_flags: tx flags | 451 | * @tx_flags: tx flags |
452 | * @hdr_len: hdr_len to be returned | 452 | * @hdr_len: hdr_len to be returned |
453 | * | 453 | * |
@@ -455,9 +455,11 @@ ddp_out: | |||
455 | * | 455 | * |
456 | * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error | 456 | * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error |
457 | */ | 457 | */ |
458 | int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, | 458 | int ixgbe_fso(struct ixgbe_ring *tx_ring, |
459 | struct ixgbe_tx_buffer *first, | ||
459 | u32 tx_flags, u8 *hdr_len) | 460 | u32 tx_flags, u8 *hdr_len) |
460 | { | 461 | { |
462 | struct sk_buff *skb = first->skb; | ||
461 | struct fc_frame_header *fh; | 463 | struct fc_frame_header *fh; |
462 | u32 vlan_macip_lens; | 464 | u32 vlan_macip_lens; |
463 | u32 fcoe_sof_eof = 0; | 465 | u32 fcoe_sof_eof = 0; |
@@ -530,9 +532,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, | |||
530 | *hdr_len = sizeof(struct fcoe_crc_eof); | 532 | *hdr_len = sizeof(struct fcoe_crc_eof); |
531 | 533 | ||
532 | /* hdr_len includes fc_hdr if FCoE LSO is enabled */ | 534 | /* hdr_len includes fc_hdr if FCoE LSO is enabled */ |
533 | if (skb_is_gso(skb)) | 535 | if (skb_is_gso(skb)) { |
534 | *hdr_len += (skb_transport_offset(skb) + | 536 | *hdr_len += skb_transport_offset(skb) + |
535 | sizeof(struct fc_frame_header)); | 537 | sizeof(struct fc_frame_header); |
538 | /* update gso_segs and bytecount */ | ||
539 | first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, | ||
540 | skb_shinfo(skb)->gso_size); | ||
541 | first->bytecount += (first->gso_segs - 1) * *hdr_len; | ||
542 | } | ||
536 | 543 | ||
537 | /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ | 544 | /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ |
538 | mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; | 545 | mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 167e898fbba6..1d8f9f83f8ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -289,7 +289,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
289 | struct ixgbe_reg_info *reginfo; | 289 | struct ixgbe_reg_info *reginfo; |
290 | int n = 0; | 290 | int n = 0; |
291 | struct ixgbe_ring *tx_ring; | 291 | struct ixgbe_ring *tx_ring; |
292 | struct ixgbe_tx_buffer *tx_buffer_info; | 292 | struct ixgbe_tx_buffer *tx_buffer; |
293 | union ixgbe_adv_tx_desc *tx_desc; | 293 | union ixgbe_adv_tx_desc *tx_desc; |
294 | struct my_u0 { u64 a; u64 b; } *u0; | 294 | struct my_u0 { u64 a; u64 b; } *u0; |
295 | struct ixgbe_ring *rx_ring; | 295 | struct ixgbe_ring *rx_ring; |
@@ -329,14 +329,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
329 | pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); | 329 | pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); |
330 | for (n = 0; n < adapter->num_tx_queues; n++) { | 330 | for (n = 0; n < adapter->num_tx_queues; n++) { |
331 | tx_ring = adapter->tx_ring[n]; | 331 | tx_ring = adapter->tx_ring[n]; |
332 | tx_buffer_info = | 332 | tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; |
333 | &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; | ||
334 | pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", | 333 | pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", |
335 | n, tx_ring->next_to_use, tx_ring->next_to_clean, | 334 | n, tx_ring->next_to_use, tx_ring->next_to_clean, |
336 | (u64)tx_buffer_info->dma, | 335 | (u64)dma_unmap_addr(tx_buffer, dma), |
337 | tx_buffer_info->length, | 336 | dma_unmap_len(tx_buffer, len), |
338 | tx_buffer_info->next_to_watch, | 337 | tx_buffer->next_to_watch, |
339 | (u64)tx_buffer_info->time_stamp); | 338 | (u64)tx_buffer->time_stamp); |
340 | } | 339 | } |
341 | 340 | ||
342 | /* Print TX Rings */ | 341 | /* Print TX Rings */ |
@@ -367,17 +366,17 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
367 | 366 | ||
368 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { | 367 | for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { |
369 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | 368 | tx_desc = IXGBE_TX_DESC(tx_ring, i); |
370 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 369 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
371 | u0 = (struct my_u0 *)tx_desc; | 370 | u0 = (struct my_u0 *)tx_desc; |
372 | pr_info("T [0x%03X] %016llX %016llX %016llX" | 371 | pr_info("T [0x%03X] %016llX %016llX %016llX" |
373 | " %04X %p %016llX %p", i, | 372 | " %04X %p %016llX %p", i, |
374 | le64_to_cpu(u0->a), | 373 | le64_to_cpu(u0->a), |
375 | le64_to_cpu(u0->b), | 374 | le64_to_cpu(u0->b), |
376 | (u64)tx_buffer_info->dma, | 375 | (u64)dma_unmap_addr(tx_buffer, dma), |
377 | tx_buffer_info->length, | 376 | dma_unmap_len(tx_buffer, len), |
378 | tx_buffer_info->next_to_watch, | 377 | tx_buffer->next_to_watch, |
379 | (u64)tx_buffer_info->time_stamp, | 378 | (u64)tx_buffer->time_stamp, |
380 | tx_buffer_info->skb); | 379 | tx_buffer->skb); |
381 | if (i == tx_ring->next_to_use && | 380 | if (i == tx_ring->next_to_use && |
382 | i == tx_ring->next_to_clean) | 381 | i == tx_ring->next_to_clean) |
383 | pr_cont(" NTC/U\n"); | 382 | pr_cont(" NTC/U\n"); |
@@ -389,11 +388,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) | |||
389 | pr_cont("\n"); | 388 | pr_cont("\n"); |
390 | 389 | ||
391 | if (netif_msg_pktdata(adapter) && | 390 | if (netif_msg_pktdata(adapter) && |
392 | tx_buffer_info->dma != 0) | 391 | dma_unmap_len(tx_buffer, len) != 0) |
393 | print_hex_dump(KERN_INFO, "", | 392 | print_hex_dump(KERN_INFO, "", |
394 | DUMP_PREFIX_ADDRESS, 16, 1, | 393 | DUMP_PREFIX_ADDRESS, 16, 1, |
395 | phys_to_virt(tx_buffer_info->dma), | 394 | phys_to_virt(dma_unmap_addr(tx_buffer, |
396 | tx_buffer_info->length, true); | 395 | dma)), |
396 | dma_unmap_len(tx_buffer, len), | ||
397 | true); | ||
397 | } | 398 | } |
398 | } | 399 | } |
399 | 400 | ||
@@ -469,17 +470,7 @@ rx_ring_summary: | |||
469 | print_hex_dump(KERN_INFO, "", | 470 | print_hex_dump(KERN_INFO, "", |
470 | DUMP_PREFIX_ADDRESS, 16, 1, | 471 | DUMP_PREFIX_ADDRESS, 16, 1, |
471 | phys_to_virt(rx_buffer_info->dma), | 472 | phys_to_virt(rx_buffer_info->dma), |
472 | rx_ring->rx_buf_len, true); | 473 | ixgbe_rx_bufsz(rx_ring), true); |
473 | |||
474 | if (rx_ring->rx_buf_len | ||
475 | < IXGBE_RXBUFFER_2K) | ||
476 | print_hex_dump(KERN_INFO, "", | ||
477 | DUMP_PREFIX_ADDRESS, 16, 1, | ||
478 | phys_to_virt( | ||
479 | rx_buffer_info->page_dma + | ||
480 | rx_buffer_info->page_offset | ||
481 | ), | ||
482 | PAGE_SIZE/2, true); | ||
483 | } | 474 | } |
484 | } | 475 | } |
485 | 476 | ||
@@ -589,32 +580,26 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, | |||
589 | } | 580 | } |
590 | } | 581 | } |
591 | 582 | ||
592 | static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring, | 583 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, |
593 | struct ixgbe_tx_buffer *tx_buffer) | 584 | struct ixgbe_tx_buffer *tx_buffer) |
594 | { | 585 | { |
595 | if (tx_buffer->dma) { | 586 | if (tx_buffer->skb) { |
596 | if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE) | 587 | dev_kfree_skb_any(tx_buffer->skb); |
597 | dma_unmap_page(ring->dev, | 588 | if (dma_unmap_len(tx_buffer, len)) |
598 | tx_buffer->dma, | ||
599 | tx_buffer->length, | ||
600 | DMA_TO_DEVICE); | ||
601 | else | ||
602 | dma_unmap_single(ring->dev, | 589 | dma_unmap_single(ring->dev, |
603 | tx_buffer->dma, | 590 | dma_unmap_addr(tx_buffer, dma), |
604 | tx_buffer->length, | 591 | dma_unmap_len(tx_buffer, len), |
605 | DMA_TO_DEVICE); | 592 | DMA_TO_DEVICE); |
593 | } else if (dma_unmap_len(tx_buffer, len)) { | ||
594 | dma_unmap_page(ring->dev, | ||
595 | dma_unmap_addr(tx_buffer, dma), | ||
596 | dma_unmap_len(tx_buffer, len), | ||
597 | DMA_TO_DEVICE); | ||
606 | } | 598 | } |
607 | tx_buffer->dma = 0; | 599 | tx_buffer->next_to_watch = NULL; |
608 | } | 600 | tx_buffer->skb = NULL; |
609 | 601 | dma_unmap_len_set(tx_buffer, len, 0); | |
610 | void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, | 602 | /* tx_buffer must be completely set up in the transmit path */ |
611 | struct ixgbe_tx_buffer *tx_buffer_info) | ||
612 | { | ||
613 | ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info); | ||
614 | if (tx_buffer_info->skb) | ||
615 | dev_kfree_skb_any(tx_buffer_info->skb); | ||
616 | tx_buffer_info->skb = NULL; | ||
617 | /* tx_buffer_info must be completely set up in the transmit path */ | ||
618 | } | 603 | } |
619 | 604 | ||
620 | static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) | 605 | static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) |
@@ -671,7 +656,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) | |||
671 | 656 | ||
672 | static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) | 657 | static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) |
673 | { | 658 | { |
674 | return ring->tx_stats.completed; | 659 | return ring->stats.packets; |
675 | } | 660 | } |
676 | 661 | ||
677 | static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) | 662 | static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) |
@@ -751,12 +736,16 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
751 | union ixgbe_adv_tx_desc *tx_desc; | 736 | union ixgbe_adv_tx_desc *tx_desc; |
752 | unsigned int total_bytes = 0, total_packets = 0; | 737 | unsigned int total_bytes = 0, total_packets = 0; |
753 | unsigned int budget = q_vector->tx.work_limit; | 738 | unsigned int budget = q_vector->tx.work_limit; |
754 | u16 i = tx_ring->next_to_clean; | 739 | unsigned int i = tx_ring->next_to_clean; |
740 | |||
741 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | ||
742 | return true; | ||
755 | 743 | ||
756 | tx_buffer = &tx_ring->tx_buffer_info[i]; | 744 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
757 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | 745 | tx_desc = IXGBE_TX_DESC(tx_ring, i); |
746 | i -= tx_ring->count; | ||
758 | 747 | ||
759 | for (; budget; budget--) { | 748 | do { |
760 | union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; | 749 | union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; |
761 | 750 | ||
762 | /* if next_to_watch is not set then there is no work pending */ | 751 | /* if next_to_watch is not set then there is no work pending */ |
@@ -770,36 +759,65 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
770 | if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) | 759 | if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) |
771 | break; | 760 | break; |
772 | 761 | ||
773 | /* count the packet as being completed */ | ||
774 | tx_ring->tx_stats.completed++; | ||
775 | |||
776 | /* clear next_to_watch to prevent false hangs */ | 762 | /* clear next_to_watch to prevent false hangs */ |
777 | tx_buffer->next_to_watch = NULL; | 763 | tx_buffer->next_to_watch = NULL; |
778 | 764 | ||
779 | do { | 765 | /* update the statistics for this packet */ |
780 | ixgbe_unmap_tx_resource(tx_ring, tx_buffer); | 766 | total_bytes += tx_buffer->bytecount; |
781 | if (likely(tx_desc == eop_desc)) { | 767 | total_packets += tx_buffer->gso_segs; |
782 | eop_desc = NULL; | ||
783 | dev_kfree_skb_any(tx_buffer->skb); | ||
784 | tx_buffer->skb = NULL; | ||
785 | 768 | ||
786 | total_bytes += tx_buffer->bytecount; | 769 | /* free the skb */ |
787 | total_packets += tx_buffer->gso_segs; | 770 | dev_kfree_skb_any(tx_buffer->skb); |
788 | } | 771 | |
772 | /* unmap skb header data */ | ||
773 | dma_unmap_single(tx_ring->dev, | ||
774 | dma_unmap_addr(tx_buffer, dma), | ||
775 | dma_unmap_len(tx_buffer, len), | ||
776 | DMA_TO_DEVICE); | ||
789 | 777 | ||
778 | /* clear tx_buffer data */ | ||
779 | tx_buffer->skb = NULL; | ||
780 | dma_unmap_len_set(tx_buffer, len, 0); | ||
781 | |||
782 | /* unmap remaining buffers */ | ||
783 | while (tx_desc != eop_desc) { | ||
790 | tx_buffer++; | 784 | tx_buffer++; |
791 | tx_desc++; | 785 | tx_desc++; |
792 | i++; | 786 | i++; |
793 | if (unlikely(i == tx_ring->count)) { | 787 | if (unlikely(!i)) { |
794 | i = 0; | 788 | i -= tx_ring->count; |
795 | |||
796 | tx_buffer = tx_ring->tx_buffer_info; | 789 | tx_buffer = tx_ring->tx_buffer_info; |
797 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | 790 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
798 | } | 791 | } |
799 | 792 | ||
800 | } while (eop_desc); | 793 | /* unmap any remaining paged data */ |
801 | } | 794 | if (dma_unmap_len(tx_buffer, len)) { |
795 | dma_unmap_page(tx_ring->dev, | ||
796 | dma_unmap_addr(tx_buffer, dma), | ||
797 | dma_unmap_len(tx_buffer, len), | ||
798 | DMA_TO_DEVICE); | ||
799 | dma_unmap_len_set(tx_buffer, len, 0); | ||
800 | } | ||
801 | } | ||
802 | |||
803 | /* move us one more past the eop_desc for start of next pkt */ | ||
804 | tx_buffer++; | ||
805 | tx_desc++; | ||
806 | i++; | ||
807 | if (unlikely(!i)) { | ||
808 | i -= tx_ring->count; | ||
809 | tx_buffer = tx_ring->tx_buffer_info; | ||
810 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | ||
811 | } | ||
802 | 812 | ||
813 | /* issue prefetch for next Tx descriptor */ | ||
814 | prefetch(tx_desc); | ||
815 | |||
816 | /* update budget accounting */ | ||
817 | budget--; | ||
818 | } while (likely(budget)); | ||
819 | |||
820 | i += tx_ring->count; | ||
803 | tx_ring->next_to_clean = i; | 821 | tx_ring->next_to_clean = i; |
804 | u64_stats_update_begin(&tx_ring->syncp); | 822 | u64_stats_update_begin(&tx_ring->syncp); |
805 | tx_ring->stats.bytes += total_bytes; | 823 | tx_ring->stats.bytes += total_bytes; |
@@ -811,7 +829,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
811 | if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { | 829 | if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) { |
812 | /* schedule immediate reset if we believe we hung */ | 830 | /* schedule immediate reset if we believe we hung */ |
813 | struct ixgbe_hw *hw = &adapter->hw; | 831 | struct ixgbe_hw *hw = &adapter->hw; |
814 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | ||
815 | e_err(drv, "Detected Tx Unit Hang\n" | 832 | e_err(drv, "Detected Tx Unit Hang\n" |
816 | " Tx Queue <%d>\n" | 833 | " Tx Queue <%d>\n" |
817 | " TDH, TDT <%x>, <%x>\n" | 834 | " TDH, TDT <%x>, <%x>\n" |
@@ -849,9 +866,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, | |||
849 | * sees the new next_to_clean. | 866 | * sees the new next_to_clean. |
850 | */ | 867 | */ |
851 | smp_mb(); | 868 | smp_mb(); |
852 | if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && | 869 | if (__netif_subqueue_stopped(tx_ring->netdev, |
853 | !test_bit(__IXGBE_DOWN, &adapter->state)) { | 870 | tx_ring->queue_index) |
854 | netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); | 871 | && !test_bit(__IXGBE_DOWN, &adapter->state)) { |
872 | netif_wake_subqueue(tx_ring->netdev, | ||
873 | tx_ring->queue_index); | ||
855 | ++tx_ring->tx_stats.restart_queue; | 874 | ++tx_ring->tx_stats.restart_queue; |
856 | } | 875 | } |
857 | } | 876 | } |
@@ -1006,6 +1025,7 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, | |||
1006 | skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); | 1025 | skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); |
1007 | } | 1026 | } |
1008 | 1027 | ||
1028 | #ifdef IXGBE_FCOE | ||
1009 | /** | 1029 | /** |
1010 | * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type | 1030 | * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type |
1011 | * @adapter: address of board private structure | 1031 | * @adapter: address of board private structure |
@@ -1024,6 +1044,7 @@ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter, | |||
1024 | IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); | 1044 | IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); |
1025 | } | 1045 | } |
1026 | 1046 | ||
1047 | #endif /* IXGBE_FCOE */ | ||
1027 | /** | 1048 | /** |
1028 | * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum | 1049 | * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum |
1029 | * @ring: structure containing ring specific data | 1050 | * @ring: structure containing ring specific data |
@@ -1051,7 +1072,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, | |||
1051 | return; | 1072 | return; |
1052 | 1073 | ||
1053 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { | 1074 | if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { |
1054 | u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; | 1075 | __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; |
1055 | 1076 | ||
1056 | /* | 1077 | /* |
1057 | * 82599 errata, UDP frames with a 0 checksum can be marked as | 1078 | * 82599 errata, UDP frames with a 0 checksum can be marked as |
@@ -1072,6 +1093,9 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, | |||
1072 | static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) | 1093 | static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) |
1073 | { | 1094 | { |
1074 | rx_ring->next_to_use = val; | 1095 | rx_ring->next_to_use = val; |
1096 | |||
1097 | /* update next to alloc since we have filled the ring */ | ||
1098 | rx_ring->next_to_alloc = val; | ||
1075 | /* | 1099 | /* |
1076 | * Force memory writes to complete before letting h/w | 1100 | * Force memory writes to complete before letting h/w |
1077 | * know there are new descriptors to fetch. (Only | 1101 | * know there are new descriptors to fetch. (Only |
@@ -1082,67 +1106,46 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) | |||
1082 | writel(val, rx_ring->tail); | 1106 | writel(val, rx_ring->tail); |
1083 | } | 1107 | } |
1084 | 1108 | ||
1085 | static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring, | ||
1086 | struct ixgbe_rx_buffer *bi) | ||
1087 | { | ||
1088 | struct sk_buff *skb = bi->skb; | ||
1089 | dma_addr_t dma = bi->dma; | ||
1090 | |||
1091 | if (dma) | ||
1092 | return true; | ||
1093 | |||
1094 | if (likely(!skb)) { | ||
1095 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, | ||
1096 | rx_ring->rx_buf_len); | ||
1097 | bi->skb = skb; | ||
1098 | if (!skb) { | ||
1099 | rx_ring->rx_stats.alloc_rx_buff_failed++; | ||
1100 | return false; | ||
1101 | } | ||
1102 | } | ||
1103 | |||
1104 | dma = dma_map_single(rx_ring->dev, skb->data, | ||
1105 | rx_ring->rx_buf_len, DMA_FROM_DEVICE); | ||
1106 | |||
1107 | if (dma_mapping_error(rx_ring->dev, dma)) { | ||
1108 | rx_ring->rx_stats.alloc_rx_buff_failed++; | ||
1109 | return false; | ||
1110 | } | ||
1111 | |||
1112 | bi->dma = dma; | ||
1113 | return true; | ||
1114 | } | ||
1115 | |||
1116 | static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, | 1109 | static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, |
1117 | struct ixgbe_rx_buffer *bi) | 1110 | struct ixgbe_rx_buffer *bi) |
1118 | { | 1111 | { |
1119 | struct page *page = bi->page; | 1112 | struct page *page = bi->page; |
1120 | dma_addr_t page_dma = bi->page_dma; | 1113 | dma_addr_t dma = bi->dma; |
1121 | unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2); | ||
1122 | 1114 | ||
1123 | if (page_dma) | 1115 | /* since we are recycling buffers we should seldom need to alloc */ |
1116 | if (likely(dma)) | ||
1124 | return true; | 1117 | return true; |
1125 | 1118 | ||
1126 | if (!page) { | 1119 | /* alloc new page for storage */ |
1127 | page = alloc_page(GFP_ATOMIC | __GFP_COLD); | 1120 | if (likely(!page)) { |
1128 | bi->page = page; | 1121 | page = alloc_pages(GFP_ATOMIC | __GFP_COLD, |
1122 | ixgbe_rx_pg_order(rx_ring)); | ||
1129 | if (unlikely(!page)) { | 1123 | if (unlikely(!page)) { |
1130 | rx_ring->rx_stats.alloc_rx_page_failed++; | 1124 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1131 | return false; | 1125 | return false; |
1132 | } | 1126 | } |
1127 | bi->page = page; | ||
1133 | } | 1128 | } |
1134 | 1129 | ||
1135 | page_dma = dma_map_page(rx_ring->dev, page, | 1130 | /* map page for use */ |
1136 | page_offset, PAGE_SIZE / 2, | 1131 | dma = dma_map_page(rx_ring->dev, page, 0, |
1137 | DMA_FROM_DEVICE); | 1132 | ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); |
1133 | |||
1134 | /* | ||
1135 | * if mapping failed free memory back to system since | ||
1136 | * there isn't much point in holding memory we can't use | ||
1137 | */ | ||
1138 | if (dma_mapping_error(rx_ring->dev, dma)) { | ||
1139 | put_page(page); | ||
1140 | bi->page = NULL; | ||
1138 | 1141 | ||
1139 | if (dma_mapping_error(rx_ring->dev, page_dma)) { | ||
1140 | rx_ring->rx_stats.alloc_rx_page_failed++; | 1142 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1141 | return false; | 1143 | return false; |
1142 | } | 1144 | } |
1143 | 1145 | ||
1144 | bi->page_dma = page_dma; | 1146 | bi->dma = dma; |
1145 | bi->page_offset = page_offset; | 1147 | bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); |
1148 | |||
1146 | return true; | 1149 | return true; |
1147 | } | 1150 | } |
1148 | 1151 | ||
@@ -1157,30 +1160,23 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) | |||
1157 | struct ixgbe_rx_buffer *bi; | 1160 | struct ixgbe_rx_buffer *bi; |
1158 | u16 i = rx_ring->next_to_use; | 1161 | u16 i = rx_ring->next_to_use; |
1159 | 1162 | ||
1160 | /* nothing to do or no valid netdev defined */ | 1163 | /* nothing to do */ |
1161 | if (!cleaned_count || !rx_ring->netdev) | 1164 | if (!cleaned_count) |
1162 | return; | 1165 | return; |
1163 | 1166 | ||
1164 | rx_desc = IXGBE_RX_DESC(rx_ring, i); | 1167 | rx_desc = IXGBE_RX_DESC(rx_ring, i); |
1165 | bi = &rx_ring->rx_buffer_info[i]; | 1168 | bi = &rx_ring->rx_buffer_info[i]; |
1166 | i -= rx_ring->count; | 1169 | i -= rx_ring->count; |
1167 | 1170 | ||
1168 | while (cleaned_count--) { | 1171 | do { |
1169 | if (!ixgbe_alloc_mapped_skb(rx_ring, bi)) | 1172 | if (!ixgbe_alloc_mapped_page(rx_ring, bi)) |
1170 | break; | 1173 | break; |
1171 | 1174 | ||
1172 | /* Refresh the desc even if buffer_addrs didn't change | 1175 | /* |
1173 | * because each write-back erases this info. */ | 1176 | * Refresh the desc even if buffer_addrs didn't change |
1174 | if (ring_is_ps_enabled(rx_ring)) { | 1177 | * because each write-back erases this info. |
1175 | rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); | 1178 | */ |
1176 | 1179 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); | |
1177 | if (!ixgbe_alloc_mapped_page(rx_ring, bi)) | ||
1178 | break; | ||
1179 | |||
1180 | rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); | ||
1181 | } else { | ||
1182 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); | ||
1183 | } | ||
1184 | 1180 | ||
1185 | rx_desc++; | 1181 | rx_desc++; |
1186 | bi++; | 1182 | bi++; |
@@ -1193,7 +1189,9 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) | |||
1193 | 1189 | ||
1194 | /* clear the hdr_addr for the next_to_use descriptor */ | 1190 | /* clear the hdr_addr for the next_to_use descriptor */ |
1195 | rx_desc->read.hdr_addr = 0; | 1191 | rx_desc->read.hdr_addr = 0; |
1196 | } | 1192 | |
1193 | cleaned_count--; | ||
1194 | } while (cleaned_count); | ||
1197 | 1195 | ||
1198 | i += rx_ring->count; | 1196 | i += rx_ring->count; |
1199 | 1197 | ||
@@ -1201,90 +1199,6 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) | |||
1201 | ixgbe_release_rx_desc(rx_ring, i); | 1199 | ixgbe_release_rx_desc(rx_ring, i); |
1202 | } | 1200 | } |
1203 | 1201 | ||
1204 | static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) | ||
1205 | { | ||
1206 | /* HW will not DMA in data larger than the given buffer, even if it | ||
1207 | * parses the (NFS, of course) header to be larger. In that case, it | ||
1208 | * fills the header buffer and spills the rest into the page. | ||
1209 | */ | ||
1210 | u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info); | ||
1211 | u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> | ||
1212 | IXGBE_RXDADV_HDRBUFLEN_SHIFT; | ||
1213 | if (hlen > IXGBE_RX_HDR_SIZE) | ||
1214 | hlen = IXGBE_RX_HDR_SIZE; | ||
1215 | return hlen; | ||
1216 | } | ||
1217 | |||
1218 | /** | ||
1219 | * ixgbe_merge_active_tail - merge active tail into lro skb | ||
1220 | * @tail: pointer to active tail in frag_list | ||
1221 | * | ||
1222 | * This function merges the length and data of an active tail into the | ||
1223 | * skb containing the frag_list. It resets the tail's pointer to the head, | ||
1224 | * but it leaves the heads pointer to tail intact. | ||
1225 | **/ | ||
1226 | static inline struct sk_buff *ixgbe_merge_active_tail(struct sk_buff *tail) | ||
1227 | { | ||
1228 | struct sk_buff *head = IXGBE_CB(tail)->head; | ||
1229 | |||
1230 | if (!head) | ||
1231 | return tail; | ||
1232 | |||
1233 | head->len += tail->len; | ||
1234 | head->data_len += tail->len; | ||
1235 | head->truesize += tail->len; | ||
1236 | |||
1237 | IXGBE_CB(tail)->head = NULL; | ||
1238 | |||
1239 | return head; | ||
1240 | } | ||
1241 | |||
1242 | /** | ||
1243 | * ixgbe_add_active_tail - adds an active tail into the skb frag_list | ||
1244 | * @head: pointer to the start of the skb | ||
1245 | * @tail: pointer to active tail to add to frag_list | ||
1246 | * | ||
1247 | * This function adds an active tail to the end of the frag list. This tail | ||
1248 | * will still be receiving data so we cannot yet ad it's stats to the main | ||
1249 | * skb. That is done via ixgbe_merge_active_tail. | ||
1250 | **/ | ||
1251 | static inline void ixgbe_add_active_tail(struct sk_buff *head, | ||
1252 | struct sk_buff *tail) | ||
1253 | { | ||
1254 | struct sk_buff *old_tail = IXGBE_CB(head)->tail; | ||
1255 | |||
1256 | if (old_tail) { | ||
1257 | ixgbe_merge_active_tail(old_tail); | ||
1258 | old_tail->next = tail; | ||
1259 | } else { | ||
1260 | skb_shinfo(head)->frag_list = tail; | ||
1261 | } | ||
1262 | |||
1263 | IXGBE_CB(tail)->head = head; | ||
1264 | IXGBE_CB(head)->tail = tail; | ||
1265 | } | ||
1266 | |||
1267 | /** | ||
1268 | * ixgbe_close_active_frag_list - cleanup pointers on a frag_list skb | ||
1269 | * @head: pointer to head of an active frag list | ||
1270 | * | ||
1271 | * This function will clear the frag_tail_tracker pointer on an active | ||
1272 | * frag_list and returns true if the pointer was actually set | ||
1273 | **/ | ||
1274 | static inline bool ixgbe_close_active_frag_list(struct sk_buff *head) | ||
1275 | { | ||
1276 | struct sk_buff *tail = IXGBE_CB(head)->tail; | ||
1277 | |||
1278 | if (!tail) | ||
1279 | return false; | ||
1280 | |||
1281 | ixgbe_merge_active_tail(tail); | ||
1282 | |||
1283 | IXGBE_CB(head)->tail = NULL; | ||
1284 | |||
1285 | return true; | ||
1286 | } | ||
1287 | |||
1288 | /** | 1202 | /** |
1289 | * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE | 1203 | * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE |
1290 | * @data: pointer to the start of the headers | 1204 | * @data: pointer to the start of the headers |
@@ -1346,7 +1260,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, | |||
1346 | /* record next protocol */ | 1260 | /* record next protocol */ |
1347 | nexthdr = hdr.ipv4->protocol; | 1261 | nexthdr = hdr.ipv4->protocol; |
1348 | hdr.network += hlen; | 1262 | hdr.network += hlen; |
1349 | #ifdef CONFIG_FCOE | 1263 | #ifdef IXGBE_FCOE |
1350 | } else if (protocol == __constant_htons(ETH_P_FCOE)) { | 1264 | } else if (protocol == __constant_htons(ETH_P_FCOE)) { |
1351 | if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) | 1265 | if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) |
1352 | return max_len; | 1266 | return max_len; |
@@ -1409,7 +1323,7 @@ static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring, | |||
1409 | static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, | 1323 | static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, |
1410 | struct sk_buff *skb) | 1324 | struct sk_buff *skb) |
1411 | { | 1325 | { |
1412 | u16 hdr_len = ixgbe_get_headlen(skb->data, skb_headlen(skb)); | 1326 | u16 hdr_len = skb_headlen(skb); |
1413 | 1327 | ||
1414 | /* set gso_size to avoid messing up TCP MSS */ | 1328 | /* set gso_size to avoid messing up TCP MSS */ |
1415 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), | 1329 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), |
@@ -1473,149 +1387,346 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, | |||
1473 | netif_rx(skb); | 1387 | netif_rx(skb); |
1474 | } | 1388 | } |
1475 | 1389 | ||
1390 | /** | ||
1391 | * ixgbe_is_non_eop - process handling of non-EOP buffers | ||
1392 | * @rx_ring: Rx ring being processed | ||
1393 | * @rx_desc: Rx descriptor for current buffer | ||
1394 | * @skb: Current socket buffer containing buffer in progress | ||
1395 | * | ||
1396 | * This function updates next to clean. If the buffer is an EOP buffer | ||
1397 | * this function exits returning false, otherwise it will place the | ||
1398 | * sk_buff in the next buffer to be chained and return true indicating | ||
1399 | * that this is in fact a non-EOP buffer. | ||
1400 | **/ | ||
1401 | static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, | ||
1402 | union ixgbe_adv_rx_desc *rx_desc, | ||
1403 | struct sk_buff *skb) | ||
1404 | { | ||
1405 | u32 ntc = rx_ring->next_to_clean + 1; | ||
1406 | |||
1407 | /* fetch, update, and store next to clean */ | ||
1408 | ntc = (ntc < rx_ring->count) ? ntc : 0; | ||
1409 | rx_ring->next_to_clean = ntc; | ||
1410 | |||
1411 | prefetch(IXGBE_RX_DESC(rx_ring, ntc)); | ||
1412 | |||
1413 | if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) | ||
1414 | return false; | ||
1415 | |||
1416 | /* append_cnt indicates packet is RSC, if so fetch nextp */ | ||
1417 | if (IXGBE_CB(skb)->append_cnt) { | ||
1418 | ntc = le32_to_cpu(rx_desc->wb.upper.status_error); | ||
1419 | ntc &= IXGBE_RXDADV_NEXTP_MASK; | ||
1420 | ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; | ||
1421 | } | ||
1422 | |||
1423 | /* place skb in next buffer to be received */ | ||
1424 | rx_ring->rx_buffer_info[ntc].skb = skb; | ||
1425 | rx_ring->rx_stats.non_eop_descs++; | ||
1426 | |||
1427 | return true; | ||
1428 | } | ||
1429 | |||
1430 | /** | ||
1431 | * ixgbe_cleanup_headers - Correct corrupted or empty headers | ||
1432 | * @rx_ring: rx descriptor ring packet is being transacted on | ||
1433 | * @rx_desc: pointer to the EOP Rx descriptor | ||
1434 | * @skb: pointer to current skb being fixed | ||
1435 | * | ||
1436 | * Check for corrupted packet headers caused by senders on the local L2 | ||
1437 | * embedded NIC switch not setting up their Tx Descriptors right. These | ||
1438 | * should be very rare. | ||
1439 | * | ||
1440 | * Also address the case where we are pulling data in on pages only | ||
1441 | * and as such no data is present in the skb header. | ||
1442 | * | ||
1443 | * In addition if skb is not at least 60 bytes we need to pad it so that | ||
1444 | * it is large enough to qualify as a valid Ethernet frame. | ||
1445 | * | ||
1446 | * Returns true if an error was encountered and skb was freed. | ||
1447 | **/ | ||
1448 | static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, | ||
1449 | union ixgbe_adv_rx_desc *rx_desc, | ||
1450 | struct sk_buff *skb) | ||
1451 | { | ||
1452 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | ||
1453 | struct net_device *netdev = rx_ring->netdev; | ||
1454 | unsigned char *va; | ||
1455 | unsigned int pull_len; | ||
1456 | |||
1457 | /* if the page was released unmap it, else just sync our portion */ | ||
1458 | if (unlikely(IXGBE_CB(skb)->page_released)) { | ||
1459 | dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, | ||
1460 | ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); | ||
1461 | IXGBE_CB(skb)->page_released = false; | ||
1462 | } else { | ||
1463 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
1464 | IXGBE_CB(skb)->dma, | ||
1465 | frag->page_offset, | ||
1466 | ixgbe_rx_bufsz(rx_ring), | ||
1467 | DMA_FROM_DEVICE); | ||
1468 | } | ||
1469 | IXGBE_CB(skb)->dma = 0; | ||
1470 | |||
1471 | /* verify that the packet does not have any known errors */ | ||
1472 | if (unlikely(ixgbe_test_staterr(rx_desc, | ||
1473 | IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && | ||
1474 | !(netdev->features & NETIF_F_RXALL))) { | ||
1475 | dev_kfree_skb_any(skb); | ||
1476 | return true; | ||
1477 | } | ||
1478 | |||
1479 | /* | ||
1480 | * it is valid to use page_address instead of kmap since we are | ||
1481 | * working with pages allocated out of the lomem pool per | ||
1482 | * alloc_page(GFP_ATOMIC) | ||
1483 | */ | ||
1484 | va = skb_frag_address(frag); | ||
1485 | |||
1486 | /* | ||
1487 | * we need the header to contain the greater of either ETH_HLEN or | ||
1488 | * 60 bytes if the skb->len is less than 60 for skb_pad. | ||
1489 | */ | ||
1490 | pull_len = skb_frag_size(frag); | ||
1491 | if (pull_len > 256) | ||
1492 | pull_len = ixgbe_get_headlen(va, pull_len); | ||
1493 | |||
1494 | /* align pull length to size of long to optimize memcpy performance */ | ||
1495 | skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); | ||
1496 | |||
1497 | /* update all of the pointers */ | ||
1498 | skb_frag_size_sub(frag, pull_len); | ||
1499 | frag->page_offset += pull_len; | ||
1500 | skb->data_len -= pull_len; | ||
1501 | skb->tail += pull_len; | ||
1502 | |||
1503 | /* | ||
1504 | * if we sucked the frag empty then we should free it, | ||
1505 | * if there are other frags here something is screwed up in hardware | ||
1506 | */ | ||
1507 | if (skb_frag_size(frag) == 0) { | ||
1508 | BUG_ON(skb_shinfo(skb)->nr_frags != 1); | ||
1509 | skb_shinfo(skb)->nr_frags = 0; | ||
1510 | __skb_frag_unref(frag); | ||
1511 | skb->truesize -= ixgbe_rx_bufsz(rx_ring); | ||
1512 | } | ||
1513 | |||
1514 | /* if skb_pad returns an error the skb was freed */ | ||
1515 | if (unlikely(skb->len < 60)) { | ||
1516 | int pad_len = 60 - skb->len; | ||
1517 | |||
1518 | if (skb_pad(skb, pad_len)) | ||
1519 | return true; | ||
1520 | __skb_put(skb, pad_len); | ||
1521 | } | ||
1522 | |||
1523 | return false; | ||
1524 | } | ||
1525 | |||
1526 | /** | ||
1527 | * ixgbe_can_reuse_page - determine if we can reuse a page | ||
1528 | * @rx_buffer: pointer to rx_buffer containing the page we want to reuse | ||
1529 | * | ||
1530 | * Returns true if page can be reused in another Rx buffer | ||
1531 | **/ | ||
1532 | static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer) | ||
1533 | { | ||
1534 | struct page *page = rx_buffer->page; | ||
1535 | |||
1536 | /* if we are only owner of page and it is local we can reuse it */ | ||
1537 | return likely(page_count(page) == 1) && | ||
1538 | likely(page_to_nid(page) == numa_node_id()); | ||
1539 | } | ||
1540 | |||
1541 | /** | ||
1542 | * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring | ||
1543 | * @rx_ring: rx descriptor ring to store buffers on | ||
1544 | * @old_buff: donor buffer to have page reused | ||
1545 | * | ||
1546 | * Syncronizes page for reuse by the adapter | ||
1547 | **/ | ||
1548 | static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, | ||
1549 | struct ixgbe_rx_buffer *old_buff) | ||
1550 | { | ||
1551 | struct ixgbe_rx_buffer *new_buff; | ||
1552 | u16 nta = rx_ring->next_to_alloc; | ||
1553 | u16 bufsz = ixgbe_rx_bufsz(rx_ring); | ||
1554 | |||
1555 | new_buff = &rx_ring->rx_buffer_info[nta]; | ||
1556 | |||
1557 | /* update, and store next to alloc */ | ||
1558 | nta++; | ||
1559 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | ||
1560 | |||
1561 | /* transfer page from old buffer to new buffer */ | ||
1562 | new_buff->page = old_buff->page; | ||
1563 | new_buff->dma = old_buff->dma; | ||
1564 | |||
1565 | /* flip page offset to other buffer and store to new_buff */ | ||
1566 | new_buff->page_offset = old_buff->page_offset ^ bufsz; | ||
1567 | |||
1568 | /* sync the buffer for use by the device */ | ||
1569 | dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, | ||
1570 | new_buff->page_offset, bufsz, | ||
1571 | DMA_FROM_DEVICE); | ||
1572 | |||
1573 | /* bump ref count on page before it is given to the stack */ | ||
1574 | get_page(new_buff->page); | ||
1575 | } | ||
1576 | |||
1577 | /** | ||
1578 | * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff | ||
1579 | * @rx_ring: rx descriptor ring to transact packets on | ||
1580 | * @rx_buffer: buffer containing page to add | ||
1581 | * @rx_desc: descriptor containing length of buffer written by hardware | ||
1582 | * @skb: sk_buff to place the data into | ||
1583 | * | ||
1584 | * This function is based on skb_add_rx_frag. I would have used that | ||
1585 | * function however it doesn't handle the truesize case correctly since we | ||
1586 | * are allocating more memory than might be used for a single receive. | ||
1587 | **/ | ||
1588 | static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, | ||
1589 | struct ixgbe_rx_buffer *rx_buffer, | ||
1590 | struct sk_buff *skb, int size) | ||
1591 | { | ||
1592 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | ||
1593 | rx_buffer->page, rx_buffer->page_offset, | ||
1594 | size); | ||
1595 | skb->len += size; | ||
1596 | skb->data_len += size; | ||
1597 | skb->truesize += ixgbe_rx_bufsz(rx_ring); | ||
1598 | } | ||
1599 | |||
1600 | /** | ||
1601 | * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf | ||
1602 | * @q_vector: structure containing interrupt and ring information | ||
1603 | * @rx_ring: rx descriptor ring to transact packets on | ||
1604 | * @budget: Total limit on number of packets to process | ||
1605 | * | ||
1606 | * This function provides a "bounce buffer" approach to Rx interrupt | ||
1607 | * processing. The advantage to this is that on systems that have | ||
1608 | * expensive overhead for IOMMU access this provides a means of avoiding | ||
1609 | * it by maintaining the mapping of the page to the syste. | ||
1610 | * | ||
1611 | * Returns true if all work is completed without reaching budget | ||
1612 | **/ | ||
1476 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | 1613 | static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, |
1477 | struct ixgbe_ring *rx_ring, | 1614 | struct ixgbe_ring *rx_ring, |
1478 | int budget) | 1615 | int budget) |
1479 | { | 1616 | { |
1480 | union ixgbe_adv_rx_desc *rx_desc, *next_rxd; | ||
1481 | struct ixgbe_rx_buffer *rx_buffer_info; | ||
1482 | struct sk_buff *skb; | ||
1483 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | 1617 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; |
1484 | const int current_node = numa_node_id(); | ||
1485 | #ifdef IXGBE_FCOE | 1618 | #ifdef IXGBE_FCOE |
1486 | struct ixgbe_adapter *adapter = q_vector->adapter; | 1619 | struct ixgbe_adapter *adapter = q_vector->adapter; |
1487 | int ddp_bytes = 0; | 1620 | int ddp_bytes = 0; |
1488 | #endif /* IXGBE_FCOE */ | 1621 | #endif /* IXGBE_FCOE */ |
1489 | u16 i; | 1622 | u16 cleaned_count = ixgbe_desc_unused(rx_ring); |
1490 | u16 cleaned_count = 0; | ||
1491 | 1623 | ||
1492 | i = rx_ring->next_to_clean; | 1624 | do { |
1493 | rx_desc = IXGBE_RX_DESC(rx_ring, i); | 1625 | struct ixgbe_rx_buffer *rx_buffer; |
1626 | union ixgbe_adv_rx_desc *rx_desc; | ||
1627 | struct sk_buff *skb; | ||
1628 | struct page *page; | ||
1629 | u16 ntc; | ||
1494 | 1630 | ||
1495 | while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { | 1631 | /* return some buffers to hardware, one at a time is too slow */ |
1496 | u32 upper_len = 0; | 1632 | if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { |
1633 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); | ||
1634 | cleaned_count = 0; | ||
1635 | } | ||
1497 | 1636 | ||
1498 | rmb(); /* read descriptor and rx_buffer_info after status DD */ | 1637 | ntc = rx_ring->next_to_clean; |
1638 | rx_desc = IXGBE_RX_DESC(rx_ring, ntc); | ||
1639 | rx_buffer = &rx_ring->rx_buffer_info[ntc]; | ||
1499 | 1640 | ||
1500 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 1641 | if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) |
1642 | break; | ||
1501 | 1643 | ||
1502 | skb = rx_buffer_info->skb; | 1644 | /* |
1503 | rx_buffer_info->skb = NULL; | 1645 | * This memory barrier is needed to keep us from reading |
1504 | prefetch(skb->data); | 1646 | * any other fields out of the rx_desc until we know the |
1647 | * RXD_STAT_DD bit is set | ||
1648 | */ | ||
1649 | rmb(); | ||
1505 | 1650 | ||
1506 | /* linear means we are building an skb from multiple pages */ | 1651 | page = rx_buffer->page; |
1507 | if (!skb_is_nonlinear(skb)) { | 1652 | prefetchw(page); |
1508 | u16 hlen; | 1653 | |
1509 | if (ring_is_ps_enabled(rx_ring)) { | 1654 | skb = rx_buffer->skb; |
1510 | hlen = ixgbe_get_hlen(rx_desc); | 1655 | |
1511 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | 1656 | if (likely(!skb)) { |
1512 | } else { | 1657 | void *page_addr = page_address(page) + |
1513 | hlen = le16_to_cpu(rx_desc->wb.upper.length); | 1658 | rx_buffer->page_offset; |
1659 | |||
1660 | /* prefetch first cache line of first page */ | ||
1661 | prefetch(page_addr); | ||
1662 | #if L1_CACHE_BYTES < 128 | ||
1663 | prefetch(page_addr + L1_CACHE_BYTES); | ||
1664 | #endif | ||
1665 | |||
1666 | /* allocate a skb to store the frags */ | ||
1667 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, | ||
1668 | IXGBE_RX_HDR_SIZE); | ||
1669 | if (unlikely(!skb)) { | ||
1670 | rx_ring->rx_stats.alloc_rx_buff_failed++; | ||
1671 | break; | ||
1514 | } | 1672 | } |
1515 | 1673 | ||
1516 | skb_put(skb, hlen); | 1674 | /* |
1675 | * we will be copying header into skb->data in | ||
1676 | * pskb_may_pull so it is in our interest to prefetch | ||
1677 | * it now to avoid a possible cache miss | ||
1678 | */ | ||
1679 | prefetchw(skb->data); | ||
1517 | 1680 | ||
1518 | /* | 1681 | /* |
1519 | * Delay unmapping of the first packet. It carries the | 1682 | * Delay unmapping of the first packet. It carries the |
1520 | * header information, HW may still access the header | 1683 | * header information, HW may still access the header |
1521 | * after writeback. Only unmap it when EOP is reached | 1684 | * after the writeback. Only unmap it when EOP is |
1685 | * reached | ||
1522 | */ | 1686 | */ |
1523 | if (!IXGBE_CB(skb)->head) { | 1687 | IXGBE_CB(skb)->dma = rx_buffer->dma; |
1524 | IXGBE_CB(skb)->delay_unmap = true; | ||
1525 | IXGBE_CB(skb)->dma = rx_buffer_info->dma; | ||
1526 | } else { | ||
1527 | skb = ixgbe_merge_active_tail(skb); | ||
1528 | dma_unmap_single(rx_ring->dev, | ||
1529 | rx_buffer_info->dma, | ||
1530 | rx_ring->rx_buf_len, | ||
1531 | DMA_FROM_DEVICE); | ||
1532 | } | ||
1533 | rx_buffer_info->dma = 0; | ||
1534 | } else { | 1688 | } else { |
1535 | /* assume packet split since header is unmapped */ | 1689 | /* we are reusing so sync this buffer for CPU use */ |
1536 | upper_len = le16_to_cpu(rx_desc->wb.upper.length); | 1690 | dma_sync_single_range_for_cpu(rx_ring->dev, |
1691 | rx_buffer->dma, | ||
1692 | rx_buffer->page_offset, | ||
1693 | ixgbe_rx_bufsz(rx_ring), | ||
1694 | DMA_FROM_DEVICE); | ||
1537 | } | 1695 | } |
1538 | 1696 | ||
1539 | if (upper_len) { | 1697 | /* pull page into skb */ |
1540 | dma_unmap_page(rx_ring->dev, | 1698 | ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, |
1541 | rx_buffer_info->page_dma, | 1699 | le16_to_cpu(rx_desc->wb.upper.length)); |
1542 | PAGE_SIZE / 2, | ||
1543 | DMA_FROM_DEVICE); | ||
1544 | rx_buffer_info->page_dma = 0; | ||
1545 | skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, | ||
1546 | rx_buffer_info->page, | ||
1547 | rx_buffer_info->page_offset, | ||
1548 | upper_len); | ||
1549 | |||
1550 | if ((page_count(rx_buffer_info->page) == 1) && | ||
1551 | (page_to_nid(rx_buffer_info->page) == current_node)) | ||
1552 | get_page(rx_buffer_info->page); | ||
1553 | else | ||
1554 | rx_buffer_info->page = NULL; | ||
1555 | 1700 | ||
1556 | skb->len += upper_len; | 1701 | if (ixgbe_can_reuse_page(rx_buffer)) { |
1557 | skb->data_len += upper_len; | 1702 | /* hand second half of page back to the ring */ |
1558 | skb->truesize += PAGE_SIZE / 2; | 1703 | ixgbe_reuse_rx_page(rx_ring, rx_buffer); |
1704 | } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { | ||
1705 | /* the page has been released from the ring */ | ||
1706 | IXGBE_CB(skb)->page_released = true; | ||
1707 | } else { | ||
1708 | /* we are not reusing the buffer so unmap it */ | ||
1709 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | ||
1710 | ixgbe_rx_pg_size(rx_ring), | ||
1711 | DMA_FROM_DEVICE); | ||
1559 | } | 1712 | } |
1560 | 1713 | ||
1561 | ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); | 1714 | /* clear contents of buffer_info */ |
1715 | rx_buffer->skb = NULL; | ||
1716 | rx_buffer->dma = 0; | ||
1717 | rx_buffer->page = NULL; | ||
1562 | 1718 | ||
1563 | i++; | 1719 | ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); |
1564 | if (i == rx_ring->count) | ||
1565 | i = 0; | ||
1566 | 1720 | ||
1567 | next_rxd = IXGBE_RX_DESC(rx_ring, i); | ||
1568 | prefetch(next_rxd); | ||
1569 | cleaned_count++; | 1721 | cleaned_count++; |
1570 | 1722 | ||
1571 | if ((!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) { | 1723 | /* place incomplete frames back on ring for completion */ |
1572 | struct ixgbe_rx_buffer *next_buffer; | 1724 | if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) |
1573 | u32 nextp; | 1725 | continue; |
1574 | |||
1575 | if (IXGBE_CB(skb)->append_cnt) { | ||
1576 | nextp = le32_to_cpu( | ||
1577 | rx_desc->wb.upper.status_error); | ||
1578 | nextp >>= IXGBE_RXDADV_NEXTP_SHIFT; | ||
1579 | } else { | ||
1580 | nextp = i; | ||
1581 | } | ||
1582 | |||
1583 | next_buffer = &rx_ring->rx_buffer_info[nextp]; | ||
1584 | |||
1585 | if (ring_is_ps_enabled(rx_ring)) { | ||
1586 | rx_buffer_info->skb = next_buffer->skb; | ||
1587 | rx_buffer_info->dma = next_buffer->dma; | ||
1588 | next_buffer->skb = skb; | ||
1589 | next_buffer->dma = 0; | ||
1590 | } else { | ||
1591 | struct sk_buff *next_skb = next_buffer->skb; | ||
1592 | ixgbe_add_active_tail(skb, next_skb); | ||
1593 | IXGBE_CB(next_skb)->head = skb; | ||
1594 | } | ||
1595 | rx_ring->rx_stats.non_eop_descs++; | ||
1596 | goto next_desc; | ||
1597 | } | ||
1598 | |||
1599 | dma_unmap_single(rx_ring->dev, | ||
1600 | IXGBE_CB(skb)->dma, | ||
1601 | rx_ring->rx_buf_len, | ||
1602 | DMA_FROM_DEVICE); | ||
1603 | IXGBE_CB(skb)->dma = 0; | ||
1604 | IXGBE_CB(skb)->delay_unmap = false; | ||
1605 | |||
1606 | if (ixgbe_close_active_frag_list(skb) && | ||
1607 | !IXGBE_CB(skb)->append_cnt) { | ||
1608 | /* if we got here without RSC the packet is invalid */ | ||
1609 | dev_kfree_skb_any(skb); | ||
1610 | goto next_desc; | ||
1611 | } | ||
1612 | 1726 | ||
1613 | /* ERR_MASK will only have valid bits if EOP set */ | 1727 | /* verify the packet layout is correct */ |
1614 | if (unlikely(ixgbe_test_staterr(rx_desc, | 1728 | if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) |
1615 | IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { | 1729 | continue; |
1616 | dev_kfree_skb_any(skb); | ||
1617 | goto next_desc; | ||
1618 | } | ||
1619 | 1730 | ||
1620 | /* probably a little skewed due to removing CRC */ | 1731 | /* probably a little skewed due to removing CRC */ |
1621 | total_rx_bytes += skb->len; | 1732 | total_rx_bytes += skb->len; |
@@ -1630,32 +1741,16 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, | |||
1630 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); | 1741 | ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); |
1631 | if (!ddp_bytes) { | 1742 | if (!ddp_bytes) { |
1632 | dev_kfree_skb_any(skb); | 1743 | dev_kfree_skb_any(skb); |
1633 | goto next_desc; | 1744 | continue; |
1634 | } | 1745 | } |
1635 | } | 1746 | } |
1747 | |||
1636 | #endif /* IXGBE_FCOE */ | 1748 | #endif /* IXGBE_FCOE */ |
1637 | ixgbe_rx_skb(q_vector, skb); | 1749 | ixgbe_rx_skb(q_vector, skb); |
1638 | 1750 | ||
1751 | /* update budget accounting */ | ||
1639 | budget--; | 1752 | budget--; |
1640 | next_desc: | 1753 | } while (likely(budget)); |
1641 | if (!budget) | ||
1642 | break; | ||
1643 | |||
1644 | /* return some buffers to hardware, one at a time is too slow */ | ||
1645 | if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { | ||
1646 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); | ||
1647 | cleaned_count = 0; | ||
1648 | } | ||
1649 | |||
1650 | /* use prefetched values */ | ||
1651 | rx_desc = next_rxd; | ||
1652 | } | ||
1653 | |||
1654 | rx_ring->next_to_clean = i; | ||
1655 | cleaned_count = ixgbe_desc_unused(rx_ring); | ||
1656 | |||
1657 | if (cleaned_count) | ||
1658 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); | ||
1659 | 1754 | ||
1660 | #ifdef IXGBE_FCOE | 1755 | #ifdef IXGBE_FCOE |
1661 | /* include DDPed FCoE data */ | 1756 | /* include DDPed FCoE data */ |
@@ -1670,8 +1765,8 @@ next_desc: | |||
1670 | total_rx_bytes += ddp_bytes; | 1765 | total_rx_bytes += ddp_bytes; |
1671 | total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); | 1766 | total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); |
1672 | } | 1767 | } |
1673 | #endif /* IXGBE_FCOE */ | ||
1674 | 1768 | ||
1769 | #endif /* IXGBE_FCOE */ | ||
1675 | u64_stats_update_begin(&rx_ring->syncp); | 1770 | u64_stats_update_begin(&rx_ring->syncp); |
1676 | rx_ring->stats.packets += total_rx_packets; | 1771 | rx_ring->stats.packets += total_rx_packets; |
1677 | rx_ring->stats.bytes += total_rx_bytes; | 1772 | rx_ring->stats.bytes += total_rx_bytes; |
@@ -1679,6 +1774,9 @@ next_desc: | |||
1679 | q_vector->rx.total_packets += total_rx_packets; | 1774 | q_vector->rx.total_packets += total_rx_packets; |
1680 | q_vector->rx.total_bytes += total_rx_bytes; | 1775 | q_vector->rx.total_bytes += total_rx_bytes; |
1681 | 1776 | ||
1777 | if (cleaned_count) | ||
1778 | ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); | ||
1779 | |||
1682 | return !!budget; | 1780 | return !!budget; |
1683 | } | 1781 | } |
1684 | 1782 | ||
@@ -2634,18 +2732,12 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, | |||
2634 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & | 2732 | srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & |
2635 | IXGBE_SRRCTL_BSIZEHDR_MASK; | 2733 | IXGBE_SRRCTL_BSIZEHDR_MASK; |
2636 | 2734 | ||
2637 | if (ring_is_ps_enabled(rx_ring)) { | 2735 | #if PAGE_SIZE > IXGBE_MAX_RXBUFFER |
2638 | #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER | 2736 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
2639 | srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
2640 | #else | 2737 | #else |
2641 | srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; | 2738 | srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; |
2642 | #endif | 2739 | #endif |
2643 | srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; | 2740 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; |
2644 | } else { | ||
2645 | srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> | ||
2646 | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||
2647 | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | ||
2648 | } | ||
2649 | 2741 | ||
2650 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); | 2742 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); |
2651 | } | 2743 | } |
@@ -2728,13 +2820,11 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, | |||
2728 | { | 2820 | { |
2729 | struct ixgbe_hw *hw = &adapter->hw; | 2821 | struct ixgbe_hw *hw = &adapter->hw; |
2730 | u32 rscctrl; | 2822 | u32 rscctrl; |
2731 | int rx_buf_len; | ||
2732 | u8 reg_idx = ring->reg_idx; | 2823 | u8 reg_idx = ring->reg_idx; |
2733 | 2824 | ||
2734 | if (!ring_is_rsc_enabled(ring)) | 2825 | if (!ring_is_rsc_enabled(ring)) |
2735 | return; | 2826 | return; |
2736 | 2827 | ||
2737 | rx_buf_len = ring->rx_buf_len; | ||
2738 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); | 2828 | rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); |
2739 | rscctrl |= IXGBE_RSCCTL_RSCEN; | 2829 | rscctrl |= IXGBE_RSCCTL_RSCEN; |
2740 | /* | 2830 | /* |
@@ -2742,24 +2832,13 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, | |||
2742 | * total size of max desc * buf_len is not greater | 2832 | * total size of max desc * buf_len is not greater |
2743 | * than 65536 | 2833 | * than 65536 |
2744 | */ | 2834 | */ |
2745 | if (ring_is_ps_enabled(ring)) { | 2835 | #if (PAGE_SIZE <= 8192) |
2746 | #if (PAGE_SIZE < 8192) | 2836 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; |
2747 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | 2837 | #elif (PAGE_SIZE <= 16384) |
2748 | #elif (PAGE_SIZE < 16384) | 2838 | rscctrl |= IXGBE_RSCCTL_MAXDESC_8; |
2749 | rscctrl |= IXGBE_RSCCTL_MAXDESC_8; | ||
2750 | #elif (PAGE_SIZE < 32768) | ||
2751 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; | ||
2752 | #else | 2839 | #else |
2753 | rscctrl |= IXGBE_RSCCTL_MAXDESC_1; | 2840 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; |
2754 | #endif | 2841 | #endif |
2755 | } else { | ||
2756 | if (rx_buf_len <= IXGBE_RXBUFFER_4K) | ||
2757 | rscctrl |= IXGBE_RSCCTL_MAXDESC_16; | ||
2758 | else if (rx_buf_len <= IXGBE_RXBUFFER_8K) | ||
2759 | rscctrl |= IXGBE_RSCCTL_MAXDESC_8; | ||
2760 | else | ||
2761 | rscctrl |= IXGBE_RSCCTL_MAXDESC_4; | ||
2762 | } | ||
2763 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); | 2842 | IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); |
2764 | } | 2843 | } |
2765 | 2844 | ||
@@ -2976,23 +3055,10 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) | |||
2976 | struct ixgbe_hw *hw = &adapter->hw; | 3055 | struct ixgbe_hw *hw = &adapter->hw; |
2977 | struct net_device *netdev = adapter->netdev; | 3056 | struct net_device *netdev = adapter->netdev; |
2978 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; | 3057 | int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; |
2979 | int rx_buf_len; | ||
2980 | struct ixgbe_ring *rx_ring; | 3058 | struct ixgbe_ring *rx_ring; |
2981 | int i; | 3059 | int i; |
2982 | u32 mhadd, hlreg0; | 3060 | u32 mhadd, hlreg0; |
2983 | 3061 | ||
2984 | /* Decide whether to use packet split mode or not */ | ||
2985 | /* On by default */ | ||
2986 | adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; | ||
2987 | |||
2988 | /* Do not use packet split if we're in SR-IOV Mode */ | ||
2989 | if (adapter->num_vfs) | ||
2990 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; | ||
2991 | |||
2992 | /* Disable packet split due to 82599 erratum #45 */ | ||
2993 | if (hw->mac.type == ixgbe_mac_82599EB) | ||
2994 | adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; | ||
2995 | |||
2996 | #ifdef IXGBE_FCOE | 3062 | #ifdef IXGBE_FCOE |
2997 | /* adjust max frame to be able to do baby jumbo for FCoE */ | 3063 | /* adjust max frame to be able to do baby jumbo for FCoE */ |
2998 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && | 3064 | if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && |
@@ -3011,27 +3077,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) | |||
3011 | /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ | 3077 | /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ |
3012 | max_frame += VLAN_HLEN; | 3078 | max_frame += VLAN_HLEN; |
3013 | 3079 | ||
3014 | /* Set the RX buffer length according to the mode */ | ||
3015 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { | ||
3016 | rx_buf_len = IXGBE_RX_HDR_SIZE; | ||
3017 | } else { | ||
3018 | if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && | ||
3019 | (netdev->mtu <= ETH_DATA_LEN)) | ||
3020 | rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; | ||
3021 | /* | ||
3022 | * Make best use of allocation by using all but 1K of a | ||
3023 | * power of 2 allocation that will be used for skb->head. | ||
3024 | */ | ||
3025 | else if (max_frame <= IXGBE_RXBUFFER_3K) | ||
3026 | rx_buf_len = IXGBE_RXBUFFER_3K; | ||
3027 | else if (max_frame <= IXGBE_RXBUFFER_7K) | ||
3028 | rx_buf_len = IXGBE_RXBUFFER_7K; | ||
3029 | else if (max_frame <= IXGBE_RXBUFFER_15K) | ||
3030 | rx_buf_len = IXGBE_RXBUFFER_15K; | ||
3031 | else | ||
3032 | rx_buf_len = IXGBE_MAX_RXBUFFER; | ||
3033 | } | ||
3034 | |||
3035 | hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); | 3080 | hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); |
3036 | /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ | 3081 | /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ |
3037 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; | 3082 | hlreg0 |= IXGBE_HLREG0_JUMBOEN; |
@@ -3043,32 +3088,16 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) | |||
3043 | */ | 3088 | */ |
3044 | for (i = 0; i < adapter->num_rx_queues; i++) { | 3089 | for (i = 0; i < adapter->num_rx_queues; i++) { |
3045 | rx_ring = adapter->rx_ring[i]; | 3090 | rx_ring = adapter->rx_ring[i]; |
3046 | rx_ring->rx_buf_len = rx_buf_len; | ||
3047 | |||
3048 | if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) | ||
3049 | set_ring_ps_enabled(rx_ring); | ||
3050 | else | ||
3051 | clear_ring_ps_enabled(rx_ring); | ||
3052 | |||
3053 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) | 3091 | if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) |
3054 | set_ring_rsc_enabled(rx_ring); | 3092 | set_ring_rsc_enabled(rx_ring); |
3055 | else | 3093 | else |
3056 | clear_ring_rsc_enabled(rx_ring); | 3094 | clear_ring_rsc_enabled(rx_ring); |
3057 | |||
3058 | #ifdef IXGBE_FCOE | 3095 | #ifdef IXGBE_FCOE |
3059 | if (netdev->features & NETIF_F_FCOE_MTU) { | 3096 | if (netdev->features & NETIF_F_FCOE_MTU) { |
3060 | struct ixgbe_ring_feature *f; | 3097 | struct ixgbe_ring_feature *f; |
3061 | f = &adapter->ring_feature[RING_F_FCOE]; | 3098 | f = &adapter->ring_feature[RING_F_FCOE]; |
3062 | if ((i >= f->mask) && (i < f->mask + f->indices)) { | 3099 | if ((i >= f->mask) && (i < f->mask + f->indices)) |
3063 | clear_ring_ps_enabled(rx_ring); | 3100 | set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state); |
3064 | if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) | ||
3065 | rx_ring->rx_buf_len = | ||
3066 | IXGBE_FCOE_JUMBO_FRAME_SIZE; | ||
3067 | } else if (!ring_is_rsc_enabled(rx_ring) && | ||
3068 | !ring_is_ps_enabled(rx_ring)) { | ||
3069 | rx_ring->rx_buf_len = | ||
3070 | IXGBE_FCOE_JUMBO_FRAME_SIZE; | ||
3071 | } | ||
3072 | } | 3101 | } |
3073 | #endif /* IXGBE_FCOE */ | 3102 | #endif /* IXGBE_FCOE */ |
3074 | } | 3103 | } |
@@ -3342,6 +3371,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) | |||
3342 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | 3371 | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); |
3343 | 3372 | ||
3344 | /* set all bits that we expect to always be set */ | 3373 | /* set all bits that we expect to always be set */ |
3374 | fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ | ||
3345 | fctrl |= IXGBE_FCTRL_BAM; | 3375 | fctrl |= IXGBE_FCTRL_BAM; |
3346 | fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ | 3376 | fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ |
3347 | fctrl |= IXGBE_FCTRL_PMCF; | 3377 | fctrl |= IXGBE_FCTRL_PMCF; |
@@ -3390,6 +3420,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev) | |||
3390 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); | 3420 | IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); |
3391 | } | 3421 | } |
3392 | 3422 | ||
3423 | /* This is useful for sniffing bad packets. */ | ||
3424 | if (adapter->netdev->features & NETIF_F_RXALL) { | ||
3425 | /* UPE and MPE will be handled by normal PROMISC logic | ||
3426 | * in e1000e_set_rx_mode */ | ||
3427 | fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ | ||
3428 | IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ | ||
3429 | IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ | ||
3430 | |||
3431 | fctrl &= ~(IXGBE_FCTRL_DPF); | ||
3432 | /* NOTE: VLAN filtering is disabled by setting PROMISC */ | ||
3433 | } | ||
3434 | |||
3393 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | 3435 | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); |
3394 | 3436 | ||
3395 | if (netdev->features & NETIF_F_HW_VLAN_RX) | 3437 | if (netdev->features & NETIF_F_HW_VLAN_RX) |
@@ -3977,6 +4019,27 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) | |||
3977 | } | 4019 | } |
3978 | 4020 | ||
3979 | /** | 4021 | /** |
4022 | * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers | ||
4023 | * @rx_ring: ring to setup | ||
4024 | * | ||
4025 | * On many IA platforms the L1 cache has a critical stride of 4K, this | ||
4026 | * results in each receive buffer starting in the same cache set. To help | ||
4027 | * reduce the pressure on this cache set we can interleave the offsets so | ||
4028 | * that only every other buffer will be in the same cache set. | ||
4029 | **/ | ||
4030 | static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring) | ||
4031 | { | ||
4032 | struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info; | ||
4033 | u16 i; | ||
4034 | |||
4035 | for (i = 0; i < rx_ring->count; i += 2) { | ||
4036 | rx_buffer[0].page_offset = 0; | ||
4037 | rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring); | ||
4038 | rx_buffer = &rx_buffer[2]; | ||
4039 | } | ||
4040 | } | ||
4041 | |||
4042 | /** | ||
3980 | * ixgbe_clean_rx_ring - Free Rx Buffers per Queue | 4043 | * ixgbe_clean_rx_ring - Free Rx Buffers per Queue |
3981 | * @rx_ring: ring to free buffers from | 4044 | * @rx_ring: ring to free buffers from |
3982 | **/ | 4045 | **/ |
@@ -3992,49 +4055,40 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) | |||
3992 | 4055 | ||
3993 | /* Free all the Rx ring sk_buffs */ | 4056 | /* Free all the Rx ring sk_buffs */ |
3994 | for (i = 0; i < rx_ring->count; i++) { | 4057 | for (i = 0; i < rx_ring->count; i++) { |
3995 | struct ixgbe_rx_buffer *rx_buffer_info; | 4058 | struct ixgbe_rx_buffer *rx_buffer; |
3996 | 4059 | ||
3997 | rx_buffer_info = &rx_ring->rx_buffer_info[i]; | 4060 | rx_buffer = &rx_ring->rx_buffer_info[i]; |
3998 | if (rx_buffer_info->dma) { | 4061 | if (rx_buffer->skb) { |
3999 | dma_unmap_single(rx_ring->dev, rx_buffer_info->dma, | 4062 | struct sk_buff *skb = rx_buffer->skb; |
4000 | rx_ring->rx_buf_len, | 4063 | if (IXGBE_CB(skb)->page_released) { |
4001 | DMA_FROM_DEVICE); | 4064 | dma_unmap_page(dev, |
4002 | rx_buffer_info->dma = 0; | 4065 | IXGBE_CB(skb)->dma, |
4003 | } | 4066 | ixgbe_rx_bufsz(rx_ring), |
4004 | if (rx_buffer_info->skb) { | 4067 | DMA_FROM_DEVICE); |
4005 | struct sk_buff *skb = rx_buffer_info->skb; | 4068 | IXGBE_CB(skb)->page_released = false; |
4006 | rx_buffer_info->skb = NULL; | ||
4007 | /* We need to clean up RSC frag lists */ | ||
4008 | skb = ixgbe_merge_active_tail(skb); | ||
4009 | ixgbe_close_active_frag_list(skb); | ||
4010 | if (IXGBE_CB(skb)->delay_unmap) { | ||
4011 | dma_unmap_single(dev, | ||
4012 | IXGBE_CB(skb)->dma, | ||
4013 | rx_ring->rx_buf_len, | ||
4014 | DMA_FROM_DEVICE); | ||
4015 | IXGBE_CB(skb)->dma = 0; | ||
4016 | IXGBE_CB(skb)->delay_unmap = false; | ||
4017 | } | 4069 | } |
4018 | dev_kfree_skb(skb); | 4070 | dev_kfree_skb(skb); |
4019 | } | 4071 | } |
4020 | if (!rx_buffer_info->page) | 4072 | rx_buffer->skb = NULL; |
4021 | continue; | 4073 | if (rx_buffer->dma) |
4022 | if (rx_buffer_info->page_dma) { | 4074 | dma_unmap_page(dev, rx_buffer->dma, |
4023 | dma_unmap_page(dev, rx_buffer_info->page_dma, | 4075 | ixgbe_rx_pg_size(rx_ring), |
4024 | PAGE_SIZE / 2, DMA_FROM_DEVICE); | 4076 | DMA_FROM_DEVICE); |
4025 | rx_buffer_info->page_dma = 0; | 4077 | rx_buffer->dma = 0; |
4026 | } | 4078 | if (rx_buffer->page) |
4027 | put_page(rx_buffer_info->page); | 4079 | put_page(rx_buffer->page); |
4028 | rx_buffer_info->page = NULL; | 4080 | rx_buffer->page = NULL; |
4029 | rx_buffer_info->page_offset = 0; | ||
4030 | } | 4081 | } |
4031 | 4082 | ||
4032 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; | 4083 | size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; |
4033 | memset(rx_ring->rx_buffer_info, 0, size); | 4084 | memset(rx_ring->rx_buffer_info, 0, size); |
4034 | 4085 | ||
4086 | ixgbe_init_rx_page_offset(rx_ring); | ||
4087 | |||
4035 | /* Zero out the descriptor ring */ | 4088 | /* Zero out the descriptor ring */ |
4036 | memset(rx_ring->desc, 0, rx_ring->size); | 4089 | memset(rx_ring->desc, 0, rx_ring->size); |
4037 | 4090 | ||
4091 | rx_ring->next_to_alloc = 0; | ||
4038 | rx_ring->next_to_clean = 0; | 4092 | rx_ring->next_to_clean = 0; |
4039 | rx_ring->next_to_use = 0; | 4093 | rx_ring->next_to_use = 0; |
4040 | } | 4094 | } |
@@ -5398,6 +5452,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) | |||
5398 | rx_ring->next_to_clean = 0; | 5452 | rx_ring->next_to_clean = 0; |
5399 | rx_ring->next_to_use = 0; | 5453 | rx_ring->next_to_use = 0; |
5400 | 5454 | ||
5455 | ixgbe_init_rx_page_offset(rx_ring); | ||
5456 | |||
5401 | return 0; | 5457 | return 0; |
5402 | err: | 5458 | err: |
5403 | vfree(rx_ring->rx_buffer_info); | 5459 | vfree(rx_ring->rx_buffer_info); |
@@ -5517,20 +5573,24 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) | |||
5517 | static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) | 5573 | static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) |
5518 | { | 5574 | { |
5519 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 5575 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
5520 | struct ixgbe_hw *hw = &adapter->hw; | ||
5521 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; | 5576 | int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; |
5522 | 5577 | ||
5523 | /* MTU < 68 is an error and causes problems on some kernels */ | 5578 | /* MTU < 68 is an error and causes problems on some kernels */ |
5524 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED && | 5579 | if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) |
5525 | hw->mac.type != ixgbe_mac_X540) { | 5580 | return -EINVAL; |
5526 | if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) | 5581 | |
5527 | return -EINVAL; | 5582 | /* |
5528 | } else { | 5583 | * For 82599EB we cannot allow PF to change MTU greater than 1500 |
5529 | if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) | 5584 | * in SR-IOV mode as it may cause buffer overruns in guest VFs that |
5585 | * don't allocate and chain buffers correctly. | ||
5586 | */ | ||
5587 | if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && | ||
5588 | (adapter->hw.mac.type == ixgbe_mac_82599EB) && | ||
5589 | (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) | ||
5530 | return -EINVAL; | 5590 | return -EINVAL; |
5531 | } | ||
5532 | 5591 | ||
5533 | e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); | 5592 | e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); |
5593 | |||
5534 | /* must set new MTU before calling down or up */ | 5594 | /* must set new MTU before calling down or up */ |
5535 | netdev->mtu = new_mtu; | 5595 | netdev->mtu = new_mtu; |
5536 | 5596 | ||
@@ -6523,9 +6583,11 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, | |||
6523 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | 6583 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
6524 | } | 6584 | } |
6525 | 6585 | ||
6526 | static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, | 6586 | static int ixgbe_tso(struct ixgbe_ring *tx_ring, |
6587 | struct ixgbe_tx_buffer *first, | ||
6527 | u32 tx_flags, __be16 protocol, u8 *hdr_len) | 6588 | u32 tx_flags, __be16 protocol, u8 *hdr_len) |
6528 | { | 6589 | { |
6590 | struct sk_buff *skb = first->skb; | ||
6529 | int err; | 6591 | int err; |
6530 | u32 vlan_macip_lens, type_tucmd; | 6592 | u32 vlan_macip_lens, type_tucmd; |
6531 | u32 mss_l4len_idx, l4len; | 6593 | u32 mss_l4len_idx, l4len; |
@@ -6559,9 +6621,14 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, | |||
6559 | 0, IPPROTO_TCP, 0); | 6621 | 0, IPPROTO_TCP, 0); |
6560 | } | 6622 | } |
6561 | 6623 | ||
6624 | /* compute header lengths */ | ||
6562 | l4len = tcp_hdrlen(skb); | 6625 | l4len = tcp_hdrlen(skb); |
6563 | *hdr_len = skb_transport_offset(skb) + l4len; | 6626 | *hdr_len = skb_transport_offset(skb) + l4len; |
6564 | 6627 | ||
6628 | /* update gso size and bytecount with header size */ | ||
6629 | first->gso_segs = skb_shinfo(skb)->gso_segs; | ||
6630 | first->bytecount += (first->gso_segs - 1) * *hdr_len; | ||
6631 | |||
6565 | /* mss_l4len_id: use 1 as index for TSO */ | 6632 | /* mss_l4len_id: use 1 as index for TSO */ |
6566 | mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; | 6633 | mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; |
6567 | mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; | 6634 | mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; |
@@ -6579,9 +6646,10 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, | |||
6579 | } | 6646 | } |
6580 | 6647 | ||
6581 | static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, | 6648 | static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, |
6582 | struct sk_buff *skb, u32 tx_flags, | 6649 | struct ixgbe_tx_buffer *first, |
6583 | __be16 protocol) | 6650 | u32 tx_flags, __be16 protocol) |
6584 | { | 6651 | { |
6652 | struct sk_buff *skb = first->skb; | ||
6585 | u32 vlan_macip_lens = 0; | 6653 | u32 vlan_macip_lens = 0; |
6586 | u32 mss_l4len_idx = 0; | 6654 | u32 mss_l4len_idx = 0; |
6587 | u32 type_tucmd = 0; | 6655 | u32 type_tucmd = 0; |
@@ -6658,7 +6726,7 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) | |||
6658 | 6726 | ||
6659 | /* set segmentation enable bits for TSO/FSO */ | 6727 | /* set segmentation enable bits for TSO/FSO */ |
6660 | #ifdef IXGBE_FCOE | 6728 | #ifdef IXGBE_FCOE |
6661 | if ((tx_flags & IXGBE_TX_FLAGS_TSO) || (tx_flags & IXGBE_TX_FLAGS_FSO)) | 6729 | if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO)) |
6662 | #else | 6730 | #else |
6663 | if (tx_flags & IXGBE_TX_FLAGS_TSO) | 6731 | if (tx_flags & IXGBE_TX_FLAGS_TSO) |
6664 | #endif | 6732 | #endif |
@@ -6667,201 +6735,193 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) | |||
6667 | return cmd_type; | 6735 | return cmd_type; |
6668 | } | 6736 | } |
6669 | 6737 | ||
6670 | static __le32 ixgbe_tx_olinfo_status(u32 tx_flags, unsigned int paylen) | 6738 | static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, |
6739 | u32 tx_flags, unsigned int paylen) | ||
6671 | { | 6740 | { |
6672 | __le32 olinfo_status = | 6741 | __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); |
6673 | cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT); | ||
6674 | |||
6675 | if (tx_flags & IXGBE_TX_FLAGS_TSO) { | ||
6676 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM | | ||
6677 | (1 << IXGBE_ADVTXD_IDX_SHIFT)); | ||
6678 | /* enble IPv4 checksum for TSO */ | ||
6679 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) | ||
6680 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); | ||
6681 | } | ||
6682 | 6742 | ||
6683 | /* enable L4 checksum for TSO and TX checksum offload */ | 6743 | /* enable L4 checksum for TSO and TX checksum offload */ |
6684 | if (tx_flags & IXGBE_TX_FLAGS_CSUM) | 6744 | if (tx_flags & IXGBE_TX_FLAGS_CSUM) |
6685 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); | 6745 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM); |
6686 | 6746 | ||
6687 | #ifdef IXGBE_FCOE | 6747 | /* enble IPv4 checksum for TSO */ |
6688 | /* use index 1 context for FCOE/FSO */ | 6748 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) |
6689 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) | 6749 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); |
6690 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC | | ||
6691 | (1 << IXGBE_ADVTXD_IDX_SHIFT)); | ||
6692 | 6750 | ||
6751 | /* use index 1 context for TSO/FSO/FCOE */ | ||
6752 | #ifdef IXGBE_FCOE | ||
6753 | if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE)) | ||
6754 | #else | ||
6755 | if (tx_flags & IXGBE_TX_FLAGS_TSO) | ||
6693 | #endif | 6756 | #endif |
6757 | olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); | ||
6758 | |||
6694 | /* | 6759 | /* |
6695 | * Check Context must be set if Tx switch is enabled, which it | 6760 | * Check Context must be set if Tx switch is enabled, which it |
6696 | * always is for case where virtual functions are running | 6761 | * always is for case where virtual functions are running |
6697 | */ | 6762 | */ |
6763 | #ifdef IXGBE_FCOE | ||
6764 | if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE)) | ||
6765 | #else | ||
6698 | if (tx_flags & IXGBE_TX_FLAGS_TXSW) | 6766 | if (tx_flags & IXGBE_TX_FLAGS_TXSW) |
6767 | #endif | ||
6699 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); | 6768 | olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC); |
6700 | 6769 | ||
6701 | return olinfo_status; | 6770 | tx_desc->read.olinfo_status = olinfo_status; |
6702 | } | 6771 | } |
6703 | 6772 | ||
6704 | #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ | 6773 | #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ |
6705 | IXGBE_TXD_CMD_RS) | 6774 | IXGBE_TXD_CMD_RS) |
6706 | 6775 | ||
6707 | static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, | 6776 | static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, |
6708 | struct sk_buff *skb, | ||
6709 | struct ixgbe_tx_buffer *first, | 6777 | struct ixgbe_tx_buffer *first, |
6710 | u32 tx_flags, | 6778 | u32 tx_flags, |
6711 | const u8 hdr_len) | 6779 | const u8 hdr_len) |
6712 | { | 6780 | { |
6713 | struct device *dev = tx_ring->dev; | ||
6714 | struct ixgbe_tx_buffer *tx_buffer_info; | ||
6715 | union ixgbe_adv_tx_desc *tx_desc; | ||
6716 | dma_addr_t dma; | 6781 | dma_addr_t dma; |
6717 | __le32 cmd_type, olinfo_status; | 6782 | struct sk_buff *skb = first->skb; |
6718 | struct skb_frag_struct *frag; | 6783 | struct ixgbe_tx_buffer *tx_buffer; |
6719 | unsigned int f = 0; | 6784 | union ixgbe_adv_tx_desc *tx_desc; |
6785 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; | ||
6720 | unsigned int data_len = skb->data_len; | 6786 | unsigned int data_len = skb->data_len; |
6721 | unsigned int size = skb_headlen(skb); | 6787 | unsigned int size = skb_headlen(skb); |
6722 | u32 offset = 0; | 6788 | unsigned int paylen = skb->len - hdr_len; |
6723 | u32 paylen = skb->len - hdr_len; | 6789 | __le32 cmd_type; |
6724 | u16 i = tx_ring->next_to_use; | 6790 | u16 i = tx_ring->next_to_use; |
6725 | u16 gso_segs; | 6791 | |
6792 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | ||
6793 | |||
6794 | ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); | ||
6795 | cmd_type = ixgbe_tx_cmd_type(tx_flags); | ||
6726 | 6796 | ||
6727 | #ifdef IXGBE_FCOE | 6797 | #ifdef IXGBE_FCOE |
6728 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { | 6798 | if (tx_flags & IXGBE_TX_FLAGS_FCOE) { |
6729 | if (data_len >= sizeof(struct fcoe_crc_eof)) { | 6799 | if (data_len < sizeof(struct fcoe_crc_eof)) { |
6730 | data_len -= sizeof(struct fcoe_crc_eof); | ||
6731 | } else { | ||
6732 | size -= sizeof(struct fcoe_crc_eof) - data_len; | 6800 | size -= sizeof(struct fcoe_crc_eof) - data_len; |
6733 | data_len = 0; | 6801 | data_len = 0; |
6802 | } else { | ||
6803 | data_len -= sizeof(struct fcoe_crc_eof); | ||
6734 | } | 6804 | } |
6735 | } | 6805 | } |
6736 | 6806 | ||
6737 | #endif | 6807 | #endif |
6738 | dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); | 6808 | dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); |
6739 | if (dma_mapping_error(dev, dma)) | 6809 | if (dma_mapping_error(tx_ring->dev, dma)) |
6740 | goto dma_error; | 6810 | goto dma_error; |
6741 | 6811 | ||
6742 | cmd_type = ixgbe_tx_cmd_type(tx_flags); | 6812 | /* record length, and DMA address */ |
6743 | olinfo_status = ixgbe_tx_olinfo_status(tx_flags, paylen); | 6813 | dma_unmap_len_set(first, len, size); |
6814 | dma_unmap_addr_set(first, dma, dma); | ||
6815 | first->tx_flags = tx_flags; | ||
6744 | 6816 | ||
6745 | tx_desc = IXGBE_TX_DESC(tx_ring, i); | 6817 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
6746 | 6818 | ||
6747 | for (;;) { | 6819 | for (;;) { |
6748 | while (size > IXGBE_MAX_DATA_PER_TXD) { | 6820 | while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { |
6749 | tx_desc->read.buffer_addr = cpu_to_le64(dma + offset); | ||
6750 | tx_desc->read.cmd_type_len = | 6821 | tx_desc->read.cmd_type_len = |
6751 | cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); | 6822 | cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); |
6752 | tx_desc->read.olinfo_status = olinfo_status; | ||
6753 | |||
6754 | offset += IXGBE_MAX_DATA_PER_TXD; | ||
6755 | size -= IXGBE_MAX_DATA_PER_TXD; | ||
6756 | 6823 | ||
6757 | tx_desc++; | ||
6758 | i++; | 6824 | i++; |
6825 | tx_desc++; | ||
6759 | if (i == tx_ring->count) { | 6826 | if (i == tx_ring->count) { |
6760 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | 6827 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); |
6761 | i = 0; | 6828 | i = 0; |
6762 | } | 6829 | } |
6830 | |||
6831 | dma += IXGBE_MAX_DATA_PER_TXD; | ||
6832 | size -= IXGBE_MAX_DATA_PER_TXD; | ||
6833 | |||
6834 | tx_desc->read.buffer_addr = cpu_to_le64(dma); | ||
6835 | tx_desc->read.olinfo_status = 0; | ||
6763 | } | 6836 | } |
6764 | 6837 | ||
6765 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6838 | if (likely(!data_len)) |
6766 | tx_buffer_info->length = offset + size; | 6839 | break; |
6767 | tx_buffer_info->tx_flags = tx_flags; | ||
6768 | tx_buffer_info->dma = dma; | ||
6769 | 6840 | ||
6770 | tx_desc->read.buffer_addr = cpu_to_le64(dma + offset); | 6841 | if (unlikely(skb->no_fcs)) |
6842 | cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS)); | ||
6771 | tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); | 6843 | tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); |
6772 | tx_desc->read.olinfo_status = olinfo_status; | ||
6773 | 6844 | ||
6774 | if (!data_len) | 6845 | i++; |
6775 | break; | 6846 | tx_desc++; |
6847 | if (i == tx_ring->count) { | ||
6848 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | ||
6849 | i = 0; | ||
6850 | } | ||
6776 | 6851 | ||
6777 | frag = &skb_shinfo(skb)->frags[f]; | ||
6778 | #ifdef IXGBE_FCOE | 6852 | #ifdef IXGBE_FCOE |
6779 | size = min_t(unsigned int, data_len, skb_frag_size(frag)); | 6853 | size = min_t(unsigned int, data_len, skb_frag_size(frag)); |
6780 | #else | 6854 | #else |
6781 | size = skb_frag_size(frag); | 6855 | size = skb_frag_size(frag); |
6782 | #endif | 6856 | #endif |
6783 | data_len -= size; | 6857 | data_len -= size; |
6784 | f++; | ||
6785 | 6858 | ||
6786 | offset = 0; | 6859 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, |
6787 | tx_flags |= IXGBE_TX_FLAGS_MAPPED_AS_PAGE; | 6860 | DMA_TO_DEVICE); |
6788 | 6861 | if (dma_mapping_error(tx_ring->dev, dma)) | |
6789 | dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); | ||
6790 | if (dma_mapping_error(dev, dma)) | ||
6791 | goto dma_error; | 6862 | goto dma_error; |
6792 | 6863 | ||
6793 | tx_desc++; | 6864 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
6794 | i++; | 6865 | dma_unmap_len_set(tx_buffer, len, size); |
6795 | if (i == tx_ring->count) { | 6866 | dma_unmap_addr_set(tx_buffer, dma, dma); |
6796 | tx_desc = IXGBE_TX_DESC(tx_ring, 0); | ||
6797 | i = 0; | ||
6798 | } | ||
6799 | } | ||
6800 | |||
6801 | tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD); | ||
6802 | |||
6803 | i++; | ||
6804 | if (i == tx_ring->count) | ||
6805 | i = 0; | ||
6806 | 6867 | ||
6807 | tx_ring->next_to_use = i; | 6868 | tx_desc->read.buffer_addr = cpu_to_le64(dma); |
6869 | tx_desc->read.olinfo_status = 0; | ||
6808 | 6870 | ||
6809 | if (tx_flags & IXGBE_TX_FLAGS_TSO) | 6871 | frag++; |
6810 | gso_segs = skb_shinfo(skb)->gso_segs; | 6872 | } |
6811 | #ifdef IXGBE_FCOE | ||
6812 | /* adjust for FCoE Sequence Offload */ | ||
6813 | else if (tx_flags & IXGBE_TX_FLAGS_FSO) | ||
6814 | gso_segs = DIV_ROUND_UP(skb->len - hdr_len, | ||
6815 | skb_shinfo(skb)->gso_size); | ||
6816 | #endif /* IXGBE_FCOE */ | ||
6817 | else | ||
6818 | gso_segs = 1; | ||
6819 | 6873 | ||
6820 | /* multiply data chunks by size of headers */ | 6874 | /* write last descriptor with RS and EOP bits */ |
6821 | tx_buffer_info->bytecount = paylen + (gso_segs * hdr_len); | 6875 | cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD); |
6822 | tx_buffer_info->gso_segs = gso_segs; | 6876 | tx_desc->read.cmd_type_len = cmd_type; |
6823 | tx_buffer_info->skb = skb; | ||
6824 | 6877 | ||
6825 | netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer_info->bytecount); | 6878 | netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); |
6826 | 6879 | ||
6827 | /* set the timestamp */ | 6880 | /* set the timestamp */ |
6828 | first->time_stamp = jiffies; | 6881 | first->time_stamp = jiffies; |
6829 | 6882 | ||
6830 | /* | 6883 | /* |
6831 | * Force memory writes to complete before letting h/w | 6884 | * Force memory writes to complete before letting h/w know there |
6832 | * know there are new descriptors to fetch. (Only | 6885 | * are new descriptors to fetch. (Only applicable for weak-ordered |
6833 | * applicable for weak-ordered memory model archs, | 6886 | * memory model archs, such as IA-64). |
6834 | * such as IA-64). | 6887 | * |
6888 | * We also need this memory barrier to make certain all of the | ||
6889 | * status bits have been updated before next_to_watch is written. | ||
6835 | */ | 6890 | */ |
6836 | wmb(); | 6891 | wmb(); |
6837 | 6892 | ||
6838 | /* set next_to_watch value indicating a packet is present */ | 6893 | /* set next_to_watch value indicating a packet is present */ |
6839 | first->next_to_watch = tx_desc; | 6894 | first->next_to_watch = tx_desc; |
6840 | 6895 | ||
6896 | i++; | ||
6897 | if (i == tx_ring->count) | ||
6898 | i = 0; | ||
6899 | |||
6900 | tx_ring->next_to_use = i; | ||
6901 | |||
6841 | /* notify HW of packet */ | 6902 | /* notify HW of packet */ |
6842 | writel(i, tx_ring->tail); | 6903 | writel(i, tx_ring->tail); |
6843 | 6904 | ||
6844 | return; | 6905 | return; |
6845 | dma_error: | 6906 | dma_error: |
6846 | dev_err(dev, "TX DMA map failed\n"); | 6907 | dev_err(tx_ring->dev, "TX DMA map failed\n"); |
6847 | 6908 | ||
6848 | /* clear dma mappings for failed tx_buffer_info map */ | 6909 | /* clear dma mappings for failed tx_buffer_info map */ |
6849 | for (;;) { | 6910 | for (;;) { |
6850 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 6911 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
6851 | ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info); | 6912 | ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); |
6852 | if (tx_buffer_info == first) | 6913 | if (tx_buffer == first) |
6853 | break; | 6914 | break; |
6854 | if (i == 0) | 6915 | if (i == 0) |
6855 | i = tx_ring->count; | 6916 | i = tx_ring->count; |
6856 | i--; | 6917 | i--; |
6857 | } | 6918 | } |
6858 | 6919 | ||
6859 | dev_kfree_skb_any(skb); | ||
6860 | |||
6861 | tx_ring->next_to_use = i; | 6920 | tx_ring->next_to_use = i; |
6862 | } | 6921 | } |
6863 | 6922 | ||
6864 | static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, | 6923 | static void ixgbe_atr(struct ixgbe_ring *ring, |
6924 | struct ixgbe_tx_buffer *first, | ||
6865 | u32 tx_flags, __be16 protocol) | 6925 | u32 tx_flags, __be16 protocol) |
6866 | { | 6926 | { |
6867 | struct ixgbe_q_vector *q_vector = ring->q_vector; | 6927 | struct ixgbe_q_vector *q_vector = ring->q_vector; |
@@ -6886,7 +6946,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, | |||
6886 | ring->atr_count++; | 6946 | ring->atr_count++; |
6887 | 6947 | ||
6888 | /* snag network header to get L4 type and address */ | 6948 | /* snag network header to get L4 type and address */ |
6889 | hdr.network = skb_network_header(skb); | 6949 | hdr.network = skb_network_header(first->skb); |
6890 | 6950 | ||
6891 | /* Currently only IPv4/IPv6 with TCP is supported */ | 6951 | /* Currently only IPv4/IPv6 with TCP is supported */ |
6892 | if ((protocol != __constant_htons(ETH_P_IPV6) || | 6952 | if ((protocol != __constant_htons(ETH_P_IPV6) || |
@@ -6895,7 +6955,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb, | |||
6895 | hdr.ipv4->protocol != IPPROTO_TCP)) | 6955 | hdr.ipv4->protocol != IPPROTO_TCP)) |
6896 | return; | 6956 | return; |
6897 | 6957 | ||
6898 | th = tcp_hdr(skb); | 6958 | th = tcp_hdr(first->skb); |
6899 | 6959 | ||
6900 | /* skip this packet since it is invalid or the socket is closing */ | 6960 | /* skip this packet since it is invalid or the socket is closing */ |
6901 | if (!th || th->fin) | 6961 | if (!th || th->fin) |
@@ -7033,6 +7093,12 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
7033 | return NETDEV_TX_BUSY; | 7093 | return NETDEV_TX_BUSY; |
7034 | } | 7094 | } |
7035 | 7095 | ||
7096 | /* record the location of the first descriptor for this packet */ | ||
7097 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; | ||
7098 | first->skb = skb; | ||
7099 | first->bytecount = skb->len; | ||
7100 | first->gso_segs = 1; | ||
7101 | |||
7036 | /* if we have a HW VLAN tag being added default to the HW one */ | 7102 | /* if we have a HW VLAN tag being added default to the HW one */ |
7037 | if (vlan_tx_tag_present(skb)) { | 7103 | if (vlan_tx_tag_present(skb)) { |
7038 | tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; | 7104 | tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; |
@@ -7079,14 +7145,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
7079 | } | 7145 | } |
7080 | } | 7146 | } |
7081 | 7147 | ||
7082 | /* record the location of the first descriptor for this packet */ | ||
7083 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; | ||
7084 | |||
7085 | #ifdef IXGBE_FCOE | 7148 | #ifdef IXGBE_FCOE |
7086 | /* setup tx offload for FCoE */ | 7149 | /* setup tx offload for FCoE */ |
7087 | if ((protocol == __constant_htons(ETH_P_FCOE)) && | 7150 | if ((protocol == __constant_htons(ETH_P_FCOE)) && |
7088 | (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { | 7151 | (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { |
7089 | tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len); | 7152 | tso = ixgbe_fso(tx_ring, first, tx_flags, &hdr_len); |
7090 | if (tso < 0) | 7153 | if (tso < 0) |
7091 | goto out_drop; | 7154 | goto out_drop; |
7092 | else if (tso) | 7155 | else if (tso) |
@@ -7103,37 +7166,55 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, | |||
7103 | if (protocol == __constant_htons(ETH_P_IP)) | 7166 | if (protocol == __constant_htons(ETH_P_IP)) |
7104 | tx_flags |= IXGBE_TX_FLAGS_IPV4; | 7167 | tx_flags |= IXGBE_TX_FLAGS_IPV4; |
7105 | 7168 | ||
7106 | tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); | 7169 | tso = ixgbe_tso(tx_ring, first, tx_flags, protocol, &hdr_len); |
7107 | if (tso < 0) | 7170 | if (tso < 0) |
7108 | goto out_drop; | 7171 | goto out_drop; |
7109 | else if (tso) | 7172 | else if (tso) |
7110 | tx_flags |= IXGBE_TX_FLAGS_TSO; | 7173 | tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; |
7111 | else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol)) | 7174 | else if (ixgbe_tx_csum(tx_ring, first, tx_flags, protocol)) |
7112 | tx_flags |= IXGBE_TX_FLAGS_CSUM; | 7175 | tx_flags |= IXGBE_TX_FLAGS_CSUM; |
7113 | 7176 | ||
7114 | /* add the ATR filter if ATR is on */ | 7177 | /* add the ATR filter if ATR is on */ |
7115 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) | 7178 | if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) |
7116 | ixgbe_atr(tx_ring, skb, tx_flags, protocol); | 7179 | ixgbe_atr(tx_ring, first, tx_flags, protocol); |
7117 | 7180 | ||
7118 | #ifdef IXGBE_FCOE | 7181 | #ifdef IXGBE_FCOE |
7119 | xmit_fcoe: | 7182 | xmit_fcoe: |
7120 | #endif /* IXGBE_FCOE */ | 7183 | #endif /* IXGBE_FCOE */ |
7121 | ixgbe_tx_map(tx_ring, skb, first, tx_flags, hdr_len); | 7184 | ixgbe_tx_map(tx_ring, first, tx_flags, hdr_len); |
7122 | 7185 | ||
7123 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); | 7186 | ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); |
7124 | 7187 | ||
7125 | return NETDEV_TX_OK; | 7188 | return NETDEV_TX_OK; |
7126 | 7189 | ||
7127 | out_drop: | 7190 | out_drop: |
7128 | dev_kfree_skb_any(skb); | 7191 | dev_kfree_skb_any(first->skb); |
7192 | first->skb = NULL; | ||
7193 | |||
7129 | return NETDEV_TX_OK; | 7194 | return NETDEV_TX_OK; |
7130 | } | 7195 | } |
7131 | 7196 | ||
7132 | static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 7197 | static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, |
7198 | struct net_device *netdev) | ||
7133 | { | 7199 | { |
7134 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 7200 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
7135 | struct ixgbe_ring *tx_ring; | 7201 | struct ixgbe_ring *tx_ring; |
7136 | 7202 | ||
7203 | if (skb->len <= 0) { | ||
7204 | dev_kfree_skb_any(skb); | ||
7205 | return NETDEV_TX_OK; | ||
7206 | } | ||
7207 | |||
7208 | /* | ||
7209 | * The minimum packet size for olinfo paylen is 17 so pad the skb | ||
7210 | * in order to meet this minimum size requirement. | ||
7211 | */ | ||
7212 | if (skb->len < 17) { | ||
7213 | if (skb_padto(skb, 17)) | ||
7214 | return NETDEV_TX_OK; | ||
7215 | skb->len = 17; | ||
7216 | } | ||
7217 | |||
7137 | tx_ring = adapter->tx_ring[skb->queue_mapping]; | 7218 | tx_ring = adapter->tx_ring[skb->queue_mapping]; |
7138 | return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); | 7219 | return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); |
7139 | } | 7220 | } |
@@ -7457,6 +7538,7 @@ static int ixgbe_set_features(struct net_device *netdev, | |||
7457 | netdev_features_t data) | 7538 | netdev_features_t data) |
7458 | { | 7539 | { |
7459 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 7540 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
7541 | netdev_features_t changed = netdev->features ^ data; | ||
7460 | bool need_reset = false; | 7542 | bool need_reset = false; |
7461 | 7543 | ||
7462 | /* Make sure RSC matches LRO, reset if change */ | 7544 | /* Make sure RSC matches LRO, reset if change */ |
@@ -7493,6 +7575,10 @@ static int ixgbe_set_features(struct net_device *netdev, | |||
7493 | need_reset = true; | 7575 | need_reset = true; |
7494 | } | 7576 | } |
7495 | 7577 | ||
7578 | if (changed & NETIF_F_RXALL) | ||
7579 | need_reset = true; | ||
7580 | |||
7581 | netdev->features = data; | ||
7496 | if (need_reset) | 7582 | if (need_reset) |
7497 | ixgbe_do_reset(netdev); | 7583 | ixgbe_do_reset(netdev); |
7498 | 7584 | ||
@@ -7771,6 +7857,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7771 | break; | 7857 | break; |
7772 | } | 7858 | } |
7773 | 7859 | ||
7860 | netdev->hw_features |= NETIF_F_RXALL; | ||
7861 | |||
7774 | netdev->vlan_features |= NETIF_F_TSO; | 7862 | netdev->vlan_features |= NETIF_F_TSO; |
7775 | netdev->vlan_features |= NETIF_F_TSO6; | 7863 | netdev->vlan_features |= NETIF_F_TSO6; |
7776 | netdev->vlan_features |= NETIF_F_IP_CSUM; | 7864 | netdev->vlan_features |= NETIF_F_IP_CSUM; |
@@ -7778,6 +7866,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, | |||
7778 | netdev->vlan_features |= NETIF_F_SG; | 7866 | netdev->vlan_features |= NETIF_F_SG; |
7779 | 7867 | ||
7780 | netdev->priv_flags |= IFF_UNICAST_FLT; | 7868 | netdev->priv_flags |= IFF_UNICAST_FLT; |
7869 | netdev->priv_flags |= IFF_SUPP_NOFCS; | ||
7781 | 7870 | ||
7782 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 7871 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
7783 | adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | | 7872 | adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | |