diff options
-rw-r--r-- | drivers/net/igb/igb.h | 12 | ||||
-rw-r--r-- | drivers/net/igb/igb_ethtool.c | 13 | ||||
-rw-r--r-- | drivers/net/igb/igb_main.c | 93 |
3 files changed, 60 insertions, 58 deletions
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 6a67fa2e6007..0c30c5e375c7 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h | |||
@@ -192,6 +192,8 @@ struct igb_ring { | |||
192 | unsigned int total_bytes; | 192 | unsigned int total_bytes; |
193 | unsigned int total_packets; | 193 | unsigned int total_packets; |
194 | 194 | ||
195 | u32 flags; | ||
196 | |||
195 | union { | 197 | union { |
196 | /* TX */ | 198 | /* TX */ |
197 | struct { | 199 | struct { |
@@ -206,6 +208,13 @@ struct igb_ring { | |||
206 | }; | 208 | }; |
207 | }; | 209 | }; |
208 | 210 | ||
211 | #define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */ | ||
212 | #define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */ | ||
213 | |||
214 | #define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */ | ||
215 | |||
216 | #define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS) | ||
217 | |||
209 | #define E1000_RX_DESC_ADV(R, i) \ | 218 | #define E1000_RX_DESC_ADV(R, i) \ |
210 | (&(((union e1000_adv_rx_desc *)((R).desc))[i])) | 219 | (&(((union e1000_adv_rx_desc *)((R).desc))[i])) |
211 | #define E1000_TX_DESC_ADV(R, i) \ | 220 | #define E1000_TX_DESC_ADV(R, i) \ |
@@ -245,7 +254,6 @@ struct igb_adapter { | |||
245 | /* TX */ | 254 | /* TX */ |
246 | struct igb_ring *tx_ring; /* One per active queue */ | 255 | struct igb_ring *tx_ring; /* One per active queue */ |
247 | unsigned long tx_queue_len; | 256 | unsigned long tx_queue_len; |
248 | u32 txd_cmd; | ||
249 | u32 gotc; | 257 | u32 gotc; |
250 | u64 gotc_old; | 258 | u64 gotc_old; |
251 | u64 tpt_old; | 259 | u64 tpt_old; |
@@ -303,8 +311,6 @@ struct igb_adapter { | |||
303 | #define IGB_FLAG_HAS_MSI (1 << 0) | 311 | #define IGB_FLAG_HAS_MSI (1 << 0) |
304 | #define IGB_FLAG_DCA_ENABLED (1 << 1) | 312 | #define IGB_FLAG_DCA_ENABLED (1 << 1) |
305 | #define IGB_FLAG_QUAD_PORT_A (1 << 2) | 313 | #define IGB_FLAG_QUAD_PORT_A (1 << 2) |
306 | #define IGB_FLAG_NEED_CTX_IDX (1 << 3) | ||
307 | #define IGB_FLAG_RX_CSUM_DISABLED (1 << 4) | ||
308 | 314 | ||
309 | enum e1000_state_t { | 315 | enum e1000_state_t { |
310 | __IGB_TESTING, | 316 | __IGB_TESTING, |
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index f62430b1f759..c44dedec1265 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c | |||
@@ -279,17 +279,20 @@ static int igb_set_pauseparam(struct net_device *netdev, | |||
279 | static u32 igb_get_rx_csum(struct net_device *netdev) | 279 | static u32 igb_get_rx_csum(struct net_device *netdev) |
280 | { | 280 | { |
281 | struct igb_adapter *adapter = netdev_priv(netdev); | 281 | struct igb_adapter *adapter = netdev_priv(netdev); |
282 | return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); | 282 | return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM); |
283 | } | 283 | } |
284 | 284 | ||
285 | static int igb_set_rx_csum(struct net_device *netdev, u32 data) | 285 | static int igb_set_rx_csum(struct net_device *netdev, u32 data) |
286 | { | 286 | { |
287 | struct igb_adapter *adapter = netdev_priv(netdev); | 287 | struct igb_adapter *adapter = netdev_priv(netdev); |
288 | int i; | ||
288 | 289 | ||
289 | if (data) | 290 | for (i = 0; i < adapter->num_rx_queues; i++) { |
290 | adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; | 291 | if (data) |
291 | else | 292 | adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM; |
292 | adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; | 293 | else |
294 | adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM; | ||
295 | } | ||
293 | 296 | ||
294 | return 0; | 297 | return 0; |
295 | } | 298 | } |
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index bdd7bf099363..00f3f2db2948 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c | |||
@@ -437,13 +437,21 @@ static int igb_alloc_queues(struct igb_adapter *adapter) | |||
437 | ring->count = adapter->tx_ring_count; | 437 | ring->count = adapter->tx_ring_count; |
438 | ring->queue_index = i; | 438 | ring->queue_index = i; |
439 | ring->pdev = adapter->pdev; | 439 | ring->pdev = adapter->pdev; |
440 | /* For 82575, context index must be unique per ring. */ | ||
441 | if (adapter->hw.mac.type == e1000_82575) | ||
442 | ring->flags = IGB_RING_FLAG_TX_CTX_IDX; | ||
440 | } | 443 | } |
444 | |||
441 | for (i = 0; i < adapter->num_rx_queues; i++) { | 445 | for (i = 0; i < adapter->num_rx_queues; i++) { |
442 | struct igb_ring *ring = &(adapter->rx_ring[i]); | 446 | struct igb_ring *ring = &(adapter->rx_ring[i]); |
443 | ring->count = adapter->rx_ring_count; | 447 | ring->count = adapter->rx_ring_count; |
444 | ring->queue_index = i; | 448 | ring->queue_index = i; |
445 | ring->pdev = adapter->pdev; | 449 | ring->pdev = adapter->pdev; |
446 | ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; | 450 | ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; |
451 | ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ | ||
452 | /* set flag indicating ring supports SCTP checksum offload */ | ||
453 | if (adapter->hw.mac.type >= e1000_82576) | ||
454 | ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; | ||
447 | } | 455 | } |
448 | 456 | ||
449 | igb_cache_ring_register(adapter); | 457 | igb_cache_ring_register(adapter); |
@@ -1517,16 +1525,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, | |||
1517 | 1525 | ||
1518 | igb_get_bus_info_pcie(hw); | 1526 | igb_get_bus_info_pcie(hw); |
1519 | 1527 | ||
1520 | /* set flags */ | ||
1521 | switch (hw->mac.type) { | ||
1522 | case e1000_82575: | ||
1523 | adapter->flags |= IGB_FLAG_NEED_CTX_IDX; | ||
1524 | break; | ||
1525 | case e1000_82576: | ||
1526 | default: | ||
1527 | break; | ||
1528 | } | ||
1529 | |||
1530 | hw->phy.autoneg_wait_to_complete = false; | 1528 | hw->phy.autoneg_wait_to_complete = false; |
1531 | hw->mac.adaptive_ifs = true; | 1529 | hw->mac.adaptive_ifs = true; |
1532 | 1530 | ||
@@ -2149,9 +2147,6 @@ static void igb_configure_tx(struct igb_adapter *adapter) | |||
2149 | 2147 | ||
2150 | for (i = 0; i < adapter->num_tx_queues; i++) | 2148 | for (i = 0; i < adapter->num_tx_queues; i++) |
2151 | igb_configure_tx_ring(adapter, &adapter->tx_ring[i]); | 2149 | igb_configure_tx_ring(adapter, &adapter->tx_ring[i]); |
2152 | |||
2153 | /* Setup Transmit Descriptor Settings for eop descriptor */ | ||
2154 | adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS; | ||
2155 | } | 2150 | } |
2156 | 2151 | ||
2157 | /** | 2152 | /** |
@@ -3272,8 +3267,7 @@ set_itr_now: | |||
3272 | #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 | 3267 | #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 |
3273 | #define IGB_TX_FLAGS_VLAN_SHIFT 16 | 3268 | #define IGB_TX_FLAGS_VLAN_SHIFT 16 |
3274 | 3269 | ||
3275 | static inline int igb_tso_adv(struct igb_adapter *adapter, | 3270 | static inline int igb_tso_adv(struct igb_ring *tx_ring, |
3276 | struct igb_ring *tx_ring, | ||
3277 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) | 3271 | struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) |
3278 | { | 3272 | { |
3279 | struct e1000_adv_tx_context_desc *context_desc; | 3273 | struct e1000_adv_tx_context_desc *context_desc; |
@@ -3335,8 +3329,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter, | |||
3335 | mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); | 3329 | mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); |
3336 | 3330 | ||
3337 | /* For 82575, context index must be unique per ring. */ | 3331 | /* For 82575, context index must be unique per ring. */ |
3338 | if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) | 3332 | if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) |
3339 | mss_l4len_idx |= tx_ring->queue_index << 4; | 3333 | mss_l4len_idx |= tx_ring->reg_idx << 4; |
3340 | 3334 | ||
3341 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); | 3335 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
3342 | context_desc->seqnum_seed = 0; | 3336 | context_desc->seqnum_seed = 0; |
@@ -3353,9 +3347,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter, | |||
3353 | return true; | 3347 | return true; |
3354 | } | 3348 | } |
3355 | 3349 | ||
3356 | static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | 3350 | static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring, |
3357 | struct igb_ring *tx_ring, | 3351 | struct sk_buff *skb, u32 tx_flags) |
3358 | struct sk_buff *skb, u32 tx_flags) | ||
3359 | { | 3352 | { |
3360 | struct e1000_adv_tx_context_desc *context_desc; | 3353 | struct e1000_adv_tx_context_desc *context_desc; |
3361 | struct pci_dev *pdev = tx_ring->pdev; | 3354 | struct pci_dev *pdev = tx_ring->pdev; |
@@ -3417,11 +3410,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, | |||
3417 | 3410 | ||
3418 | context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); | 3411 | context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); |
3419 | context_desc->seqnum_seed = 0; | 3412 | context_desc->seqnum_seed = 0; |
3420 | if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) | 3413 | if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) |
3421 | context_desc->mss_l4len_idx = | 3414 | context_desc->mss_l4len_idx = |
3422 | cpu_to_le32(tx_ring->queue_index << 4); | 3415 | cpu_to_le32(tx_ring->reg_idx << 4); |
3423 | else | ||
3424 | context_desc->mss_l4len_idx = 0; | ||
3425 | 3416 | ||
3426 | buffer_info->time_stamp = jiffies; | 3417 | buffer_info->time_stamp = jiffies; |
3427 | buffer_info->next_to_watch = i; | 3418 | buffer_info->next_to_watch = i; |
@@ -3492,8 +3483,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb, | |||
3492 | return count + 1; | 3483 | return count + 1; |
3493 | } | 3484 | } |
3494 | 3485 | ||
3495 | static inline void igb_tx_queue_adv(struct igb_adapter *adapter, | 3486 | static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, |
3496 | struct igb_ring *tx_ring, | ||
3497 | int tx_flags, int count, u32 paylen, | 3487 | int tx_flags, int count, u32 paylen, |
3498 | u8 hdr_len) | 3488 | u8 hdr_len) |
3499 | { | 3489 | { |
@@ -3525,10 +3515,11 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, | |||
3525 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; | 3515 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; |
3526 | } | 3516 | } |
3527 | 3517 | ||
3528 | if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) && | 3518 | if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) && |
3529 | (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | | 3519 | (tx_flags & (IGB_TX_FLAGS_CSUM | |
3520 | IGB_TX_FLAGS_TSO | | ||
3530 | IGB_TX_FLAGS_VLAN))) | 3521 | IGB_TX_FLAGS_VLAN))) |
3531 | olinfo_status |= tx_ring->queue_index << 4; | 3522 | olinfo_status |= tx_ring->reg_idx << 4; |
3532 | 3523 | ||
3533 | olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); | 3524 | olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); |
3534 | 3525 | ||
@@ -3545,7 +3536,7 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter, | |||
3545 | i = 0; | 3536 | i = 0; |
3546 | } | 3537 | } |
3547 | 3538 | ||
3548 | tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); | 3539 | tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD); |
3549 | /* Force memory writes to complete before letting h/w | 3540 | /* Force memory writes to complete before letting h/w |
3550 | * know there are new descriptors to fetch. (Only | 3541 | * know there are new descriptors to fetch. (Only |
3551 | * applicable for weak-ordered memory model archs, | 3542 | * applicable for weak-ordered memory model archs, |
@@ -3644,17 +3635,17 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
3644 | tx_flags |= IGB_TX_FLAGS_IPV4; | 3635 | tx_flags |= IGB_TX_FLAGS_IPV4; |
3645 | 3636 | ||
3646 | first = tx_ring->next_to_use; | 3637 | first = tx_ring->next_to_use; |
3647 | tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, | 3638 | if (skb_is_gso(skb)) { |
3648 | &hdr_len) : 0; | 3639 | tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len); |
3649 | 3640 | if (tso < 0) { | |
3650 | if (tso < 0) { | 3641 | dev_kfree_skb_any(skb); |
3651 | dev_kfree_skb_any(skb); | 3642 | return NETDEV_TX_OK; |
3652 | return NETDEV_TX_OK; | 3643 | } |
3653 | } | 3644 | } |
3654 | 3645 | ||
3655 | if (tso) | 3646 | if (tso) |
3656 | tx_flags |= IGB_TX_FLAGS_TSO; | 3647 | tx_flags |= IGB_TX_FLAGS_TSO; |
3657 | else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) && | 3648 | else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) && |
3658 | (skb->ip_summed == CHECKSUM_PARTIAL)) | 3649 | (skb->ip_summed == CHECKSUM_PARTIAL)) |
3659 | tx_flags |= IGB_TX_FLAGS_CSUM; | 3650 | tx_flags |= IGB_TX_FLAGS_CSUM; |
3660 | 3651 | ||
@@ -3664,17 +3655,18 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, | |||
3664 | */ | 3655 | */ |
3665 | count = igb_tx_map_adv(tx_ring, skb, first); | 3656 | count = igb_tx_map_adv(tx_ring, skb, first); |
3666 | 3657 | ||
3667 | if (count) { | 3658 | if (!count) { |
3668 | igb_tx_queue_adv(adapter, tx_ring, tx_flags, count, | ||
3669 | skb->len, hdr_len); | ||
3670 | /* Make sure there is space in the ring for the next send. */ | ||
3671 | igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); | ||
3672 | } else { | ||
3673 | dev_kfree_skb_any(skb); | 3659 | dev_kfree_skb_any(skb); |
3674 | tx_ring->buffer_info[first].time_stamp = 0; | 3660 | tx_ring->buffer_info[first].time_stamp = 0; |
3675 | tx_ring->next_to_use = first; | 3661 | tx_ring->next_to_use = first; |
3662 | return NETDEV_TX_OK; | ||
3676 | } | 3663 | } |
3677 | 3664 | ||
3665 | igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); | ||
3666 | |||
3667 | /* Make sure there is space in the ring for the next send. */ | ||
3668 | igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); | ||
3669 | |||
3678 | return NETDEV_TX_OK; | 3670 | return NETDEV_TX_OK; |
3679 | } | 3671 | } |
3680 | 3672 | ||
@@ -4800,15 +4792,15 @@ static void igb_receive_skb(struct igb_q_vector *q_vector, | |||
4800 | } | 4792 | } |
4801 | 4793 | ||
4802 | static inline void igb_rx_checksum_adv(struct igb_ring *ring, | 4794 | static inline void igb_rx_checksum_adv(struct igb_ring *ring, |
4803 | struct igb_adapter *adapter, | ||
4804 | u32 status_err, struct sk_buff *skb) | 4795 | u32 status_err, struct sk_buff *skb) |
4805 | { | 4796 | { |
4806 | skb->ip_summed = CHECKSUM_NONE; | 4797 | skb->ip_summed = CHECKSUM_NONE; |
4807 | 4798 | ||
4808 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ | 4799 | /* Ignore Checksum bit is set or checksum is disabled through ethtool */ |
4809 | if ((status_err & E1000_RXD_STAT_IXSM) || | 4800 | if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || |
4810 | (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) | 4801 | (status_err & E1000_RXD_STAT_IXSM)) |
4811 | return; | 4802 | return; |
4803 | |||
4812 | /* TCP/UDP checksum error bit is set */ | 4804 | /* TCP/UDP checksum error bit is set */ |
4813 | if (status_err & | 4805 | if (status_err & |
4814 | (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { | 4806 | (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { |
@@ -4817,9 +4809,10 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring, | |||
4817 | * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) | 4809 | * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) |
4818 | * packets, (aka let the stack check the crc32c) | 4810 | * packets, (aka let the stack check the crc32c) |
4819 | */ | 4811 | */ |
4820 | if (!((adapter->hw.mac.type == e1000_82576) && | 4812 | if ((skb->len == 60) && |
4821 | (skb->len == 60))) | 4813 | (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) |
4822 | ring->rx_stats.csum_err++; | 4814 | ring->rx_stats.csum_err++; |
4815 | |||
4823 | /* let the stack verify checksum errors */ | 4816 | /* let the stack verify checksum errors */ |
4824 | return; | 4817 | return; |
4825 | } | 4818 | } |
@@ -4827,7 +4820,7 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring, | |||
4827 | if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) | 4820 | if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) |
4828 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 4821 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
4829 | 4822 | ||
4830 | dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); | 4823 | dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); |
4831 | } | 4824 | } |
4832 | 4825 | ||
4833 | static inline u16 igb_get_hlen(struct igb_ring *rx_ring, | 4826 | static inline u16 igb_get_hlen(struct igb_ring *rx_ring, |
@@ -4978,7 +4971,7 @@ send_up: | |||
4978 | total_bytes += skb->len; | 4971 | total_bytes += skb->len; |
4979 | total_packets++; | 4972 | total_packets++; |
4980 | 4973 | ||
4981 | igb_rx_checksum_adv(rx_ring, adapter, staterr, skb); | 4974 | igb_rx_checksum_adv(rx_ring, staterr, skb); |
4982 | 4975 | ||
4983 | skb->protocol = eth_type_trans(skb, netdev); | 4976 | skb->protocol = eth_type_trans(skb, netdev); |
4984 | skb_record_rx_queue(skb, rx_ring->queue_index); | 4977 | skb_record_rx_queue(skb, rx_ring->queue_index); |