diff options
author | David S. Miller <davem@davemloft.net> | 2012-11-15 15:18:02 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-11-15 15:18:02 -0500 |
commit | efad0c14b720ec3791c95e0658c3990fbc124366 (patch) | |
tree | 00f3fc8854031882d016033a45c3bf34aedbf525 | |
parent | 1ff05fb7114a6b4118e0f7d89fed2659f7131b0a (diff) | |
parent | 55fb277c1f6181d445f88ef8596b87eab61f14b6 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says:
====================
This series contains updates to ioat (DCA) and ixgbevf.
...
Alexander Duyck (1):
ioat: Do not enable DCA if tag map is invalid
Greg Rose (8):
ixgbevf: Streamline the rx buffer allocation
ixgbevf: Fix unnecessary dereference where local var is available.
ixgbevf: Remove the ring adapter pointer value
ixgbevf: Remove checking for mac.ops function pointers
ixgbevf: Remove mailbox spinlock from the reset function
ixgbevf: White space and comments clean up
ixgbevf: Remove unneeded and obsolete comment
ixgbevf: Add checksum statistics counters to rings
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/dma/ioat/dca.c | 23 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 123 |
3 files changed, 72 insertions, 77 deletions
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index abd9038e06b1..d6668071bd0d 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c | |||
@@ -604,6 +604,23 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) | |||
604 | return slots; | 604 | return slots; |
605 | } | 605 | } |
606 | 606 | ||
607 | static inline int dca3_tag_map_invalid(u8 *tag_map) | ||
608 | { | ||
609 | /* | ||
610 | * If the tag map is not programmed by the BIOS the default is: | ||
611 | * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00 | ||
612 | * | ||
613 | * This an invalid map and will result in only 2 possible tags | ||
614 | * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that | ||
615 | * this entire definition is invalid. | ||
616 | */ | ||
617 | return ((tag_map[0] == DCA_TAG_MAP_VALID) && | ||
618 | (tag_map[1] == DCA_TAG_MAP_VALID) && | ||
619 | (tag_map[2] == DCA_TAG_MAP_VALID) && | ||
620 | (tag_map[3] == DCA_TAG_MAP_VALID) && | ||
621 | (tag_map[4] == DCA_TAG_MAP_VALID)); | ||
622 | } | ||
623 | |||
607 | struct dca_provider * __devinit | 624 | struct dca_provider * __devinit |
608 | ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | 625 | ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) |
609 | { | 626 | { |
@@ -674,6 +691,12 @@ ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) | |||
674 | ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; | 691 | ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; |
675 | } | 692 | } |
676 | 693 | ||
694 | if (dca3_tag_map_invalid(ioatdca->tag_map)) { | ||
695 | dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n"); | ||
696 | free_dca_provider(dca); | ||
697 | return NULL; | ||
698 | } | ||
699 | |||
677 | err = register_dca_provider(dca, &pdev->dev); | 700 | err = register_dca_provider(dca, &pdev->dev); |
678 | if (err) { | 701 | if (err) { |
679 | free_dca_provider(dca); | 702 | free_dca_provider(dca); |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 1211fa093437..fc0af9a3bb35 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | |||
@@ -58,7 +58,6 @@ struct ixgbevf_ring { | |||
58 | struct ixgbevf_ring *next; | 58 | struct ixgbevf_ring *next; |
59 | struct net_device *netdev; | 59 | struct net_device *netdev; |
60 | struct device *dev; | 60 | struct device *dev; |
61 | struct ixgbevf_adapter *adapter; /* backlink */ | ||
62 | void *desc; /* descriptor ring memory */ | 61 | void *desc; /* descriptor ring memory */ |
63 | dma_addr_t dma; /* phys. address of descriptor ring */ | 62 | dma_addr_t dma; /* phys. address of descriptor ring */ |
64 | unsigned int size; /* length in bytes */ | 63 | unsigned int size; /* length in bytes */ |
@@ -75,6 +74,8 @@ struct ixgbevf_ring { | |||
75 | u64 total_bytes; | 74 | u64 total_bytes; |
76 | u64 total_packets; | 75 | u64 total_packets; |
77 | struct u64_stats_sync syncp; | 76 | struct u64_stats_sync syncp; |
77 | u64 hw_csum_rx_error; | ||
78 | u64 hw_csum_rx_good; | ||
78 | 79 | ||
79 | u16 head; | 80 | u16 head; |
80 | u16 tail; | 81 | u16 tail; |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 9d8815302363..f267c003a1bc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -121,7 +121,6 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, | |||
121 | * @direction: 0 for Rx, 1 for Tx, -1 for other causes | 121 | * @direction: 0 for Rx, 1 for Tx, -1 for other causes |
122 | * @queue: queue to map the corresponding interrupt to | 122 | * @queue: queue to map the corresponding interrupt to |
123 | * @msix_vector: the vector to map to the corresponding queue | 123 | * @msix_vector: the vector to map to the corresponding queue |
124 | * | ||
125 | */ | 124 | */ |
126 | static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, | 125 | static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, |
127 | u8 queue, u8 msix_vector) | 126 | u8 queue, u8 msix_vector) |
@@ -296,12 +295,11 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, | |||
296 | 295 | ||
297 | /** | 296 | /** |
298 | * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum | 297 | * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum |
299 | * @adapter: address of board private structure | 298 | * @ring: pointer to Rx descriptor ring structure |
300 | * @status_err: hardware indication of status of receive | 299 | * @status_err: hardware indication of status of receive |
301 | * @skb: skb currently being received and modified | 300 | * @skb: skb currently being received and modified |
302 | **/ | 301 | **/ |
303 | static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, | 302 | static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, |
304 | struct ixgbevf_ring *ring, | ||
305 | u32 status_err, struct sk_buff *skb) | 303 | u32 status_err, struct sk_buff *skb) |
306 | { | 304 | { |
307 | skb_checksum_none_assert(skb); | 305 | skb_checksum_none_assert(skb); |
@@ -313,7 +311,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, | |||
313 | /* if IP and error */ | 311 | /* if IP and error */ |
314 | if ((status_err & IXGBE_RXD_STAT_IPCS) && | 312 | if ((status_err & IXGBE_RXD_STAT_IPCS) && |
315 | (status_err & IXGBE_RXDADV_ERR_IPE)) { | 313 | (status_err & IXGBE_RXDADV_ERR_IPE)) { |
316 | adapter->hw_csum_rx_error++; | 314 | ring->hw_csum_rx_error++; |
317 | return; | 315 | return; |
318 | } | 316 | } |
319 | 317 | ||
@@ -321,13 +319,13 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, | |||
321 | return; | 319 | return; |
322 | 320 | ||
323 | if (status_err & IXGBE_RXDADV_ERR_TCPE) { | 321 | if (status_err & IXGBE_RXDADV_ERR_TCPE) { |
324 | adapter->hw_csum_rx_error++; | 322 | ring->hw_csum_rx_error++; |
325 | return; | 323 | return; |
326 | } | 324 | } |
327 | 325 | ||
328 | /* It must be a TCP or UDP packet with a valid checksum */ | 326 | /* It must be a TCP or UDP packet with a valid checksum */ |
329 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 327 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
330 | adapter->hw_csum_rx_good++; | 328 | ring->hw_csum_rx_good++; |
331 | } | 329 | } |
332 | 330 | ||
333 | /** | 331 | /** |
@@ -341,15 +339,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, | |||
341 | struct pci_dev *pdev = adapter->pdev; | 339 | struct pci_dev *pdev = adapter->pdev; |
342 | union ixgbe_adv_rx_desc *rx_desc; | 340 | union ixgbe_adv_rx_desc *rx_desc; |
343 | struct ixgbevf_rx_buffer *bi; | 341 | struct ixgbevf_rx_buffer *bi; |
344 | struct sk_buff *skb; | ||
345 | unsigned int i = rx_ring->next_to_use; | 342 | unsigned int i = rx_ring->next_to_use; |
346 | 343 | ||
347 | bi = &rx_ring->rx_buffer_info[i]; | 344 | bi = &rx_ring->rx_buffer_info[i]; |
348 | 345 | ||
349 | while (cleaned_count--) { | 346 | while (cleaned_count--) { |
350 | rx_desc = IXGBEVF_RX_DESC(rx_ring, i); | 347 | rx_desc = IXGBEVF_RX_DESC(rx_ring, i); |
351 | skb = bi->skb; | 348 | |
352 | if (!skb) { | 349 | if (!bi->skb) { |
350 | struct sk_buff *skb; | ||
351 | |||
353 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, | 352 | skb = netdev_alloc_skb_ip_align(rx_ring->netdev, |
354 | rx_ring->rx_buf_len); | 353 | rx_ring->rx_buf_len); |
355 | if (!skb) { | 354 | if (!skb) { |
@@ -357,8 +356,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, | |||
357 | goto no_buffers; | 356 | goto no_buffers; |
358 | } | 357 | } |
359 | bi->skb = skb; | 358 | bi->skb = skb; |
360 | } | 359 | |
361 | if (!bi->dma) { | ||
362 | bi->dma = dma_map_single(&pdev->dev, skb->data, | 360 | bi->dma = dma_map_single(&pdev->dev, skb->data, |
363 | rx_ring->rx_buf_len, | 361 | rx_ring->rx_buf_len, |
364 | DMA_FROM_DEVICE); | 362 | DMA_FROM_DEVICE); |
@@ -380,7 +378,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, | |||
380 | no_buffers: | 378 | no_buffers: |
381 | if (rx_ring->next_to_use != i) { | 379 | if (rx_ring->next_to_use != i) { |
382 | rx_ring->next_to_use = i; | 380 | rx_ring->next_to_use = i; |
383 | |||
384 | ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); | 381 | ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); |
385 | } | 382 | } |
386 | } | 383 | } |
@@ -464,7 +461,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, | |||
464 | goto next_desc; | 461 | goto next_desc; |
465 | } | 462 | } |
466 | 463 | ||
467 | ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb); | 464 | ixgbevf_rx_checksum(rx_ring, staterr, skb); |
468 | 465 | ||
469 | /* probably a little skewed due to removing CRC */ | 466 | /* probably a little skewed due to removing CRC */ |
470 | total_rx_bytes += skb->len; | 467 | total_rx_bytes += skb->len; |
@@ -765,7 +762,6 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data) | |||
765 | return IRQ_HANDLED; | 762 | return IRQ_HANDLED; |
766 | } | 763 | } |
767 | 764 | ||
768 | |||
769 | /** | 765 | /** |
770 | * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) | 766 | * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) |
771 | * @irq: unused | 767 | * @irq: unused |
@@ -1150,9 +1146,6 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |||
1150 | struct ixgbe_hw *hw = &adapter->hw; | 1146 | struct ixgbe_hw *hw = &adapter->hw; |
1151 | int err; | 1147 | int err; |
1152 | 1148 | ||
1153 | if (!hw->mac.ops.set_vfta) | ||
1154 | return -EOPNOTSUPP; | ||
1155 | |||
1156 | spin_lock_bh(&adapter->mbx_lock); | 1149 | spin_lock_bh(&adapter->mbx_lock); |
1157 | 1150 | ||
1158 | /* add VID to filter table */ | 1151 | /* add VID to filter table */ |
@@ -1181,8 +1174,7 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |||
1181 | spin_lock_bh(&adapter->mbx_lock); | 1174 | spin_lock_bh(&adapter->mbx_lock); |
1182 | 1175 | ||
1183 | /* remove VID from filter table */ | 1176 | /* remove VID from filter table */ |
1184 | if (hw->mac.ops.set_vfta) | 1177 | err = hw->mac.ops.set_vfta(hw, vid, 0, false); |
1185 | err = hw->mac.ops.set_vfta(hw, vid, 0, false); | ||
1186 | 1178 | ||
1187 | spin_unlock_bh(&adapter->mbx_lock); | 1179 | spin_unlock_bh(&adapter->mbx_lock); |
1188 | 1180 | ||
@@ -1228,12 +1220,13 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) | |||
1228 | } | 1220 | } |
1229 | 1221 | ||
1230 | /** | 1222 | /** |
1231 | * ixgbevf_set_rx_mode - Multicast set | 1223 | * ixgbevf_set_rx_mode - Multicast and unicast set |
1232 | * @netdev: network interface device structure | 1224 | * @netdev: network interface device structure |
1233 | * | 1225 | * |
1234 | * The set_rx_method entry point is called whenever the multicast address | 1226 | * The set_rx_method entry point is called whenever the multicast address |
1235 | * list or the network interface flags are updated. This routine is | 1227 | * list, unicast address list or the network interface flags are updated. |
1236 | * responsible for configuring the hardware for proper multicast mode. | 1228 | * This routine is responsible for configuring the hardware for proper |
1229 | * multicast mode and configuring requested unicast filters. | ||
1237 | **/ | 1230 | **/ |
1238 | static void ixgbevf_set_rx_mode(struct net_device *netdev) | 1231 | static void ixgbevf_set_rx_mode(struct net_device *netdev) |
1239 | { | 1232 | { |
@@ -1243,8 +1236,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) | |||
1243 | spin_lock_bh(&adapter->mbx_lock); | 1236 | spin_lock_bh(&adapter->mbx_lock); |
1244 | 1237 | ||
1245 | /* reprogram multicast list */ | 1238 | /* reprogram multicast list */ |
1246 | if (hw->mac.ops.update_mc_addr_list) | 1239 | hw->mac.ops.update_mc_addr_list(hw, netdev); |
1247 | hw->mac.ops.update_mc_addr_list(hw, netdev); | ||
1248 | 1240 | ||
1249 | ixgbevf_write_uc_addr_list(netdev); | 1241 | ixgbevf_write_uc_addr_list(netdev); |
1250 | 1242 | ||
@@ -1312,8 +1304,8 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, | |||
1312 | "not set within the polling period\n", rxr); | 1304 | "not set within the polling period\n", rxr); |
1313 | } | 1305 | } |
1314 | 1306 | ||
1315 | ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], | 1307 | ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr], |
1316 | (adapter->rx_ring[rxr].count - 1)); | 1308 | adapter->rx_ring[rxr].count - 1); |
1317 | } | 1309 | } |
1318 | 1310 | ||
1319 | static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) | 1311 | static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) |
@@ -1414,12 +1406,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) | |||
1414 | 1406 | ||
1415 | spin_lock_bh(&adapter->mbx_lock); | 1407 | spin_lock_bh(&adapter->mbx_lock); |
1416 | 1408 | ||
1417 | if (hw->mac.ops.set_rar) { | 1409 | if (is_valid_ether_addr(hw->mac.addr)) |
1418 | if (is_valid_ether_addr(hw->mac.addr)) | 1410 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); |
1419 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); | 1411 | else |
1420 | else | 1412 | hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); |
1421 | hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); | ||
1422 | } | ||
1423 | 1413 | ||
1424 | spin_unlock_bh(&adapter->mbx_lock); | 1414 | spin_unlock_bh(&adapter->mbx_lock); |
1425 | 1415 | ||
@@ -1595,7 +1585,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, | |||
1595 | return; | 1585 | return; |
1596 | 1586 | ||
1597 | /* Free all the Tx ring sk_buffs */ | 1587 | /* Free all the Tx ring sk_buffs */ |
1598 | |||
1599 | for (i = 0; i < tx_ring->count; i++) { | 1588 | for (i = 0; i < tx_ring->count; i++) { |
1600 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; | 1589 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
1601 | ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); | 1590 | ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); |
@@ -1691,13 +1680,6 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) | |||
1691 | while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) | 1680 | while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) |
1692 | msleep(1); | 1681 | msleep(1); |
1693 | 1682 | ||
1694 | /* | ||
1695 | * Check if PF is up before re-init. If not then skip until | ||
1696 | * later when the PF is up and ready to service requests from | ||
1697 | * the VF via mailbox. If the VF is up and running then the | ||
1698 | * watchdog task will continue to schedule reset tasks until | ||
1699 | * the PF is up and running. | ||
1700 | */ | ||
1701 | ixgbevf_down(adapter); | 1683 | ixgbevf_down(adapter); |
1702 | ixgbevf_up(adapter); | 1684 | ixgbevf_up(adapter); |
1703 | 1685 | ||
@@ -1709,15 +1691,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) | |||
1709 | struct ixgbe_hw *hw = &adapter->hw; | 1691 | struct ixgbe_hw *hw = &adapter->hw; |
1710 | struct net_device *netdev = adapter->netdev; | 1692 | struct net_device *netdev = adapter->netdev; |
1711 | 1693 | ||
1712 | spin_lock_bh(&adapter->mbx_lock); | ||
1713 | |||
1714 | if (hw->mac.ops.reset_hw(hw)) | 1694 | if (hw->mac.ops.reset_hw(hw)) |
1715 | hw_dbg(hw, "PF still resetting\n"); | 1695 | hw_dbg(hw, "PF still resetting\n"); |
1716 | else | 1696 | else |
1717 | hw->mac.ops.init_hw(hw); | 1697 | hw->mac.ops.init_hw(hw); |
1718 | 1698 | ||
1719 | spin_unlock_bh(&adapter->mbx_lock); | ||
1720 | |||
1721 | if (is_valid_ether_addr(adapter->hw.mac.addr)) { | 1699 | if (is_valid_ether_addr(adapter->hw.mac.addr)) { |
1722 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, | 1700 | memcpy(netdev->dev_addr, adapter->hw.mac.addr, |
1723 | netdev->addr_len); | 1701 | netdev->addr_len); |
@@ -1768,6 +1746,7 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, | |||
1768 | */ | 1746 | */ |
1769 | adapter->num_msix_vectors = vectors; | 1747 | adapter->num_msix_vectors = vectors; |
1770 | } | 1748 | } |
1749 | |||
1771 | return err; | 1750 | return err; |
1772 | } | 1751 | } |
1773 | 1752 | ||
@@ -2064,7 +2043,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) | |||
2064 | goto out; | 2043 | goto out; |
2065 | } | 2044 | } |
2066 | memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, | 2045 | memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, |
2067 | adapter->netdev->addr_len); | 2046 | adapter->netdev->addr_len); |
2068 | } | 2047 | } |
2069 | 2048 | ||
2070 | /* lock to protect mailbox accesses */ | 2049 | /* lock to protect mailbox accesses */ |
@@ -2114,6 +2093,7 @@ out: | |||
2114 | void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) | 2093 | void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) |
2115 | { | 2094 | { |
2116 | struct ixgbe_hw *hw = &adapter->hw; | 2095 | struct ixgbe_hw *hw = &adapter->hw; |
2096 | int i; | ||
2117 | 2097 | ||
2118 | UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, | 2098 | UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, |
2119 | adapter->stats.vfgprc); | 2099 | adapter->stats.vfgprc); |
@@ -2127,6 +2107,15 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) | |||
2127 | adapter->stats.vfgotc); | 2107 | adapter->stats.vfgotc); |
2128 | UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, | 2108 | UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, |
2129 | adapter->stats.vfmprc); | 2109 | adapter->stats.vfmprc); |
2110 | |||
2111 | for (i = 0; i < adapter->num_rx_queues; i++) { | ||
2112 | adapter->hw_csum_rx_error += | ||
2113 | adapter->rx_ring[i].hw_csum_rx_error; | ||
2114 | adapter->hw_csum_rx_good += | ||
2115 | adapter->rx_ring[i].hw_csum_rx_good; | ||
2116 | adapter->rx_ring[i].hw_csum_rx_error = 0; | ||
2117 | adapter->rx_ring[i].hw_csum_rx_good = 0; | ||
2118 | } | ||
2130 | } | 2119 | } |
2131 | 2120 | ||
2132 | /** | 2121 | /** |
@@ -2201,6 +2190,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work) | |||
2201 | struct ixgbe_hw *hw = &adapter->hw; | 2190 | struct ixgbe_hw *hw = &adapter->hw; |
2202 | u32 link_speed = adapter->link_speed; | 2191 | u32 link_speed = adapter->link_speed; |
2203 | bool link_up = adapter->link_up; | 2192 | bool link_up = adapter->link_up; |
2193 | s32 need_reset; | ||
2204 | 2194 | ||
2205 | adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; | 2195 | adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; |
2206 | 2196 | ||
@@ -2208,29 +2198,19 @@ static void ixgbevf_watchdog_task(struct work_struct *work) | |||
2208 | * Always check the link on the watchdog because we have | 2198 | * Always check the link on the watchdog because we have |
2209 | * no LSC interrupt | 2199 | * no LSC interrupt |
2210 | */ | 2200 | */ |
2211 | if (hw->mac.ops.check_link) { | 2201 | spin_lock_bh(&adapter->mbx_lock); |
2212 | s32 need_reset; | ||
2213 | |||
2214 | spin_lock_bh(&adapter->mbx_lock); | ||
2215 | 2202 | ||
2216 | need_reset = hw->mac.ops.check_link(hw, &link_speed, | 2203 | need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); |
2217 | &link_up, false); | ||
2218 | 2204 | ||
2219 | spin_unlock_bh(&adapter->mbx_lock); | 2205 | spin_unlock_bh(&adapter->mbx_lock); |
2220 | 2206 | ||
2221 | if (need_reset) { | 2207 | if (need_reset) { |
2222 | adapter->link_up = link_up; | 2208 | adapter->link_up = link_up; |
2223 | adapter->link_speed = link_speed; | 2209 | adapter->link_speed = link_speed; |
2224 | netif_carrier_off(netdev); | 2210 | netif_carrier_off(netdev); |
2225 | netif_tx_stop_all_queues(netdev); | 2211 | netif_tx_stop_all_queues(netdev); |
2226 | schedule_work(&adapter->reset_task); | 2212 | schedule_work(&adapter->reset_task); |
2227 | goto pf_has_reset; | 2213 | goto pf_has_reset; |
2228 | } | ||
2229 | } else { | ||
2230 | /* always assume link is up, if no check link | ||
2231 | * function */ | ||
2232 | link_speed = IXGBE_LINK_SPEED_10GB_FULL; | ||
2233 | link_up = true; | ||
2234 | } | 2214 | } |
2235 | adapter->link_up = link_up; | 2215 | adapter->link_up = link_up; |
2236 | adapter->link_speed = link_speed; | 2216 | adapter->link_speed = link_speed; |
@@ -2723,9 +2703,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, | |||
2723 | static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, | 2703 | static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, |
2724 | struct sk_buff *skb, u32 tx_flags) | 2704 | struct sk_buff *skb, u32 tx_flags) |
2725 | { | 2705 | { |
2726 | |||
2727 | |||
2728 | |||
2729 | u32 vlan_macip_lens = 0; | 2706 | u32 vlan_macip_lens = 0; |
2730 | u32 mss_l4len_idx = 0; | 2707 | u32 mss_l4len_idx = 0; |
2731 | u32 type_tucmd = 0; | 2708 | u32 type_tucmd = 0; |
@@ -2915,7 +2892,6 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, | |||
2915 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); | 2892 | olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); |
2916 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) | 2893 | if (tx_flags & IXGBE_TX_FLAGS_IPV4) |
2917 | olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; | 2894 | olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; |
2918 | |||
2919 | } | 2895 | } |
2920 | 2896 | ||
2921 | /* | 2897 | /* |
@@ -3070,8 +3046,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) | |||
3070 | 3046 | ||
3071 | spin_lock_bh(&adapter->mbx_lock); | 3047 | spin_lock_bh(&adapter->mbx_lock); |
3072 | 3048 | ||
3073 | if (hw->mac.ops.set_rar) | 3049 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); |
3074 | hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); | ||
3075 | 3050 | ||
3076 | spin_unlock_bh(&adapter->mbx_lock); | 3051 | spin_unlock_bh(&adapter->mbx_lock); |
3077 | 3052 | ||
@@ -3396,10 +3371,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, | |||
3396 | if (err) | 3371 | if (err) |
3397 | goto err_sw_init; | 3372 | goto err_sw_init; |
3398 | 3373 | ||
3399 | /* pick up the PCI bus settings for reporting later */ | ||
3400 | if (hw->mac.ops.get_bus_info) | ||
3401 | hw->mac.ops.get_bus_info(hw); | ||
3402 | |||
3403 | strcpy(netdev->name, "eth%d"); | 3374 | strcpy(netdev->name, "eth%d"); |
3404 | 3375 | ||
3405 | err = register_netdev(netdev); | 3376 | err = register_netdev(netdev); |