diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40evf')
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 164 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40e_type.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40evf/i40evf_main.c | 110 |
6 files changed, 131 insertions, 153 deletions
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 458fbb421090..395f32f226c0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) | |||
322 | tx_ring->vsi->seid, | 322 | tx_ring->vsi->seid, |
323 | tx_ring->queue_index, | 323 | tx_ring->queue_index, |
324 | tx_ring->next_to_use, i); | 324 | tx_ring->next_to_use, i); |
325 | dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n" | ||
326 | " time_stamp <%lx>\n" | ||
327 | " jiffies <%lx>\n", | ||
328 | tx_ring->tx_bi[i].time_stamp, jiffies); | ||
329 | 325 | ||
330 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | 326 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); |
331 | 327 | ||
@@ -488,6 +484,8 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) | |||
488 | if (!dev) | 484 | if (!dev) |
489 | return -ENOMEM; | 485 | return -ENOMEM; |
490 | 486 | ||
487 | /* warn if we are about to overwrite the pointer */ | ||
488 | WARN_ON(tx_ring->tx_bi); | ||
491 | bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; | 489 | bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; |
492 | tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); | 490 | tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); |
493 | if (!tx_ring->tx_bi) | 491 | if (!tx_ring->tx_bi) |
@@ -648,6 +646,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) | |||
648 | struct device *dev = rx_ring->dev; | 646 | struct device *dev = rx_ring->dev; |
649 | int bi_size; | 647 | int bi_size; |
650 | 648 | ||
649 | /* warn if we are about to overwrite the pointer */ | ||
650 | WARN_ON(rx_ring->rx_bi); | ||
651 | bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; | 651 | bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; |
652 | rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); | 652 | rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); |
653 | if (!rx_ring->rx_bi) | 653 | if (!rx_ring->rx_bi) |
@@ -1128,9 +1128,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) | |||
1128 | /* ERR_MASK will only have valid bits if EOP set */ | 1128 | /* ERR_MASK will only have valid bits if EOP set */ |
1129 | if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { | 1129 | if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { |
1130 | dev_kfree_skb_any(skb); | 1130 | dev_kfree_skb_any(skb); |
1131 | /* TODO: shouldn't we increment a counter indicating the | ||
1132 | * drop? | ||
1133 | */ | ||
1134 | continue; | 1131 | continue; |
1135 | } | 1132 | } |
1136 | 1133 | ||
@@ -1156,7 +1153,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) | |||
1156 | skb_mark_napi_id(skb, &rx_ring->q_vector->napi); | 1153 | skb_mark_napi_id(skb, &rx_ring->q_vector->napi); |
1157 | i40e_receive_skb(rx_ring, skb, vlan_tag); | 1154 | i40e_receive_skb(rx_ring, skb, vlan_tag); |
1158 | 1155 | ||
1159 | rx_ring->netdev->last_rx = jiffies; | ||
1160 | rx_desc->wb.qword1.status_error_len = 0; | 1156 | rx_desc->wb.qword1.status_error_len = 0; |
1161 | 1157 | ||
1162 | } while (likely(total_rx_packets < budget)); | 1158 | } while (likely(total_rx_packets < budget)); |
@@ -1271,7 +1267,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) | |||
1271 | : 0; | 1267 | : 0; |
1272 | i40e_receive_skb(rx_ring, skb, vlan_tag); | 1268 | i40e_receive_skb(rx_ring, skb, vlan_tag); |
1273 | 1269 | ||
1274 | rx_ring->netdev->last_rx = jiffies; | ||
1275 | rx_desc->wb.qword1.status_error_len = 0; | 1270 | rx_desc->wb.qword1.status_error_len = 0; |
1276 | } while (likely(total_rx_packets < budget)); | 1271 | } while (likely(total_rx_packets < budget)); |
1277 | 1272 | ||
@@ -1352,7 +1347,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) | |||
1352 | } | 1347 | } |
1353 | 1348 | ||
1354 | /** | 1349 | /** |
1355 | * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW | 1350 | * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW |
1356 | * @skb: send buffer | 1351 | * @skb: send buffer |
1357 | * @tx_ring: ring to send buffer on | 1352 | * @tx_ring: ring to send buffer on |
1358 | * @flags: the tx flags to be set | 1353 | * @flags: the tx flags to be set |
@@ -1363,9 +1358,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) | |||
1363 | * Returns error code indicate the frame should be dropped upon error and the | 1358 | * Returns error code indicate the frame should be dropped upon error and the |
1364 | * otherwise returns 0 to indicate the flags has been set properly. | 1359 | * otherwise returns 0 to indicate the flags has been set properly. |
1365 | **/ | 1360 | **/ |
1366 | static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, | 1361 | static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb, |
1367 | struct i40e_ring *tx_ring, | 1362 | struct i40e_ring *tx_ring, |
1368 | u32 *flags) | 1363 | u32 *flags) |
1369 | { | 1364 | { |
1370 | __be16 protocol = skb->protocol; | 1365 | __be16 protocol = skb->protocol; |
1371 | u32 tx_flags = 0; | 1366 | u32 tx_flags = 0; |
@@ -1408,16 +1403,14 @@ out: | |||
1408 | * i40e_tso - set up the tso context descriptor | 1403 | * i40e_tso - set up the tso context descriptor |
1409 | * @tx_ring: ptr to the ring to send | 1404 | * @tx_ring: ptr to the ring to send |
1410 | * @skb: ptr to the skb we're sending | 1405 | * @skb: ptr to the skb we're sending |
1411 | * @tx_flags: the collected send information | ||
1412 | * @protocol: the send protocol | ||
1413 | * @hdr_len: ptr to the size of the packet header | 1406 | * @hdr_len: ptr to the size of the packet header |
1414 | * @cd_tunneling: ptr to context descriptor bits | 1407 | * @cd_tunneling: ptr to context descriptor bits |
1415 | * | 1408 | * |
1416 | * Returns 0 if no TSO can happen, 1 if tso is going, or error | 1409 | * Returns 0 if no TSO can happen, 1 if tso is going, or error |
1417 | **/ | 1410 | **/ |
1418 | static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, | 1411 | static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, |
1419 | u32 tx_flags, __be16 protocol, u8 *hdr_len, | 1412 | u8 *hdr_len, u64 *cd_type_cmd_tso_mss, |
1420 | u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) | 1413 | u32 *cd_tunneling) |
1421 | { | 1414 | { |
1422 | u32 cd_cmd, cd_tso_len, cd_mss; | 1415 | u32 cd_cmd, cd_tso_len, cd_mss; |
1423 | struct ipv6hdr *ipv6h; | 1416 | struct ipv6hdr *ipv6h; |
@@ -1468,12 +1461,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
1468 | /** | 1461 | /** |
1469 | * i40e_tx_enable_csum - Enable Tx checksum offloads | 1462 | * i40e_tx_enable_csum - Enable Tx checksum offloads |
1470 | * @skb: send buffer | 1463 | * @skb: send buffer |
1471 | * @tx_flags: Tx flags currently set | 1464 | * @tx_flags: pointer to Tx flags currently set |
1472 | * @td_cmd: Tx descriptor command bits to set | 1465 | * @td_cmd: Tx descriptor command bits to set |
1473 | * @td_offset: Tx descriptor header offsets to set | 1466 | * @td_offset: Tx descriptor header offsets to set |
1474 | * @cd_tunneling: ptr to context desc bits | 1467 | * @cd_tunneling: ptr to context desc bits |
1475 | **/ | 1468 | **/ |
1476 | static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | 1469 | static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, |
1477 | u32 *td_cmd, u32 *td_offset, | 1470 | u32 *td_cmd, u32 *td_offset, |
1478 | struct i40e_ring *tx_ring, | 1471 | struct i40e_ring *tx_ring, |
1479 | u32 *cd_tunneling) | 1472 | u32 *cd_tunneling) |
@@ -1489,6 +1482,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
1489 | switch (ip_hdr(skb)->protocol) { | 1482 | switch (ip_hdr(skb)->protocol) { |
1490 | case IPPROTO_UDP: | 1483 | case IPPROTO_UDP: |
1491 | l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; | 1484 | l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; |
1485 | *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; | ||
1492 | break; | 1486 | break; |
1493 | default: | 1487 | default: |
1494 | return; | 1488 | return; |
@@ -1498,18 +1492,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
1498 | this_ipv6_hdr = inner_ipv6_hdr(skb); | 1492 | this_ipv6_hdr = inner_ipv6_hdr(skb); |
1499 | this_tcp_hdrlen = inner_tcp_hdrlen(skb); | 1493 | this_tcp_hdrlen = inner_tcp_hdrlen(skb); |
1500 | 1494 | ||
1501 | if (tx_flags & I40E_TX_FLAGS_IPV4) { | 1495 | if (*tx_flags & I40E_TX_FLAGS_IPV4) { |
1502 | 1496 | if (*tx_flags & I40E_TX_FLAGS_TSO) { | |
1503 | if (tx_flags & I40E_TX_FLAGS_TSO) { | ||
1504 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; | 1497 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; |
1505 | ip_hdr(skb)->check = 0; | 1498 | ip_hdr(skb)->check = 0; |
1506 | } else { | 1499 | } else { |
1507 | *cd_tunneling |= | 1500 | *cd_tunneling |= |
1508 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; | 1501 | I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; |
1509 | } | 1502 | } |
1510 | } else if (tx_flags & I40E_TX_FLAGS_IPV6) { | 1503 | } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { |
1511 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; | 1504 | *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; |
1512 | if (tx_flags & I40E_TX_FLAGS_TSO) | 1505 | if (*tx_flags & I40E_TX_FLAGS_TSO) |
1513 | ip_hdr(skb)->check = 0; | 1506 | ip_hdr(skb)->check = 0; |
1514 | } | 1507 | } |
1515 | 1508 | ||
@@ -1521,8 +1514,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
1521 | skb_transport_offset(skb)) >> 1) << | 1514 | skb_transport_offset(skb)) >> 1) << |
1522 | I40E_TXD_CTX_QW0_NATLEN_SHIFT; | 1515 | I40E_TXD_CTX_QW0_NATLEN_SHIFT; |
1523 | if (this_ip_hdr->version == 6) { | 1516 | if (this_ip_hdr->version == 6) { |
1524 | tx_flags &= ~I40E_TX_FLAGS_IPV4; | 1517 | *tx_flags &= ~I40E_TX_FLAGS_IPV4; |
1525 | tx_flags |= I40E_TX_FLAGS_IPV6; | 1518 | *tx_flags |= I40E_TX_FLAGS_IPV6; |
1526 | } | 1519 | } |
1527 | 1520 | ||
1528 | 1521 | ||
@@ -1534,12 +1527,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
1534 | } | 1527 | } |
1535 | 1528 | ||
1536 | /* Enable IP checksum offloads */ | 1529 | /* Enable IP checksum offloads */ |
1537 | if (tx_flags & I40E_TX_FLAGS_IPV4) { | 1530 | if (*tx_flags & I40E_TX_FLAGS_IPV4) { |
1538 | l4_hdr = this_ip_hdr->protocol; | 1531 | l4_hdr = this_ip_hdr->protocol; |
1539 | /* the stack computes the IP header already, the only time we | 1532 | /* the stack computes the IP header already, the only time we |
1540 | * need the hardware to recompute it is in the case of TSO. | 1533 | * need the hardware to recompute it is in the case of TSO. |
1541 | */ | 1534 | */ |
1542 | if (tx_flags & I40E_TX_FLAGS_TSO) { | 1535 | if (*tx_flags & I40E_TX_FLAGS_TSO) { |
1543 | *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; | 1536 | *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; |
1544 | this_ip_hdr->check = 0; | 1537 | this_ip_hdr->check = 0; |
1545 | } else { | 1538 | } else { |
@@ -1548,7 +1541,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, | |||
1548 | /* Now set the td_offset for IP header length */ | 1541 | /* Now set the td_offset for IP header length */ |
1549 | *td_offset = (network_hdr_len >> 2) << | 1542 | *td_offset = (network_hdr_len >> 2) << |
1550 | I40E_TX_DESC_LENGTH_IPLEN_SHIFT; | 1543 | I40E_TX_DESC_LENGTH_IPLEN_SHIFT; |
1551 | } else if (tx_flags & I40E_TX_FLAGS_IPV6) { | 1544 | } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { |
1552 | l4_hdr = this_ipv6_hdr->nexthdr; | 1545 | l4_hdr = this_ipv6_hdr->nexthdr; |
1553 | *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; | 1546 | *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; |
1554 | /* Now set the td_offset for IP header length */ | 1547 | /* Now set the td_offset for IP header length */ |
@@ -1672,7 +1665,44 @@ linearize_chk_done: | |||
1672 | } | 1665 | } |
1673 | 1666 | ||
1674 | /** | 1667 | /** |
1675 | * i40e_tx_map - Build the Tx descriptor | 1668 | * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions |
1669 | * @tx_ring: the ring to be checked | ||
1670 | * @size: the size buffer we want to assure is available | ||
1671 | * | ||
1672 | * Returns -EBUSY if a stop is needed, else 0 | ||
1673 | **/ | ||
1674 | static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | ||
1675 | { | ||
1676 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | ||
1677 | /* Memory barrier before checking head and tail */ | ||
1678 | smp_mb(); | ||
1679 | |||
1680 | /* Check again in a case another CPU has just made room available. */ | ||
1681 | if (likely(I40E_DESC_UNUSED(tx_ring) < size)) | ||
1682 | return -EBUSY; | ||
1683 | |||
1684 | /* A reprieve! - use start_queue because it doesn't call schedule */ | ||
1685 | netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); | ||
1686 | ++tx_ring->tx_stats.restart_queue; | ||
1687 | return 0; | ||
1688 | } | ||
1689 | |||
1690 | /** | ||
1691 | * i40evf_maybe_stop_tx - 1st level check for tx stop conditions | ||
1692 | * @tx_ring: the ring to be checked | ||
1693 | * @size: the size buffer we want to assure is available | ||
1694 | * | ||
1695 | * Returns 0 if stop is not needed | ||
1696 | **/ | ||
1697 | static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | ||
1698 | { | ||
1699 | if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) | ||
1700 | return 0; | ||
1701 | return __i40evf_maybe_stop_tx(tx_ring, size); | ||
1702 | } | ||
1703 | |||
1704 | /** | ||
1705 | * i40evf_tx_map - Build the Tx descriptor | ||
1676 | * @tx_ring: ring to send buffer on | 1706 | * @tx_ring: ring to send buffer on |
1677 | * @skb: send buffer | 1707 | * @skb: send buffer |
1678 | * @first: first buffer info buffer to use | 1708 | * @first: first buffer info buffer to use |
@@ -1681,9 +1711,9 @@ linearize_chk_done: | |||
1681 | * @td_cmd: the command field in the descriptor | 1711 | * @td_cmd: the command field in the descriptor |
1682 | * @td_offset: offset for checksum or crc | 1712 | * @td_offset: offset for checksum or crc |
1683 | **/ | 1713 | **/ |
1684 | static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, | 1714 | static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, |
1685 | struct i40e_tx_buffer *first, u32 tx_flags, | 1715 | struct i40e_tx_buffer *first, u32 tx_flags, |
1686 | const u8 hdr_len, u32 td_cmd, u32 td_offset) | 1716 | const u8 hdr_len, u32 td_cmd, u32 td_offset) |
1687 | { | 1717 | { |
1688 | unsigned int data_len = skb->data_len; | 1718 | unsigned int data_len = skb->data_len; |
1689 | unsigned int size = skb_headlen(skb); | 1719 | unsigned int size = skb_headlen(skb); |
@@ -1789,9 +1819,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
1789 | tx_ring->queue_index), | 1819 | tx_ring->queue_index), |
1790 | first->bytecount); | 1820 | first->bytecount); |
1791 | 1821 | ||
1792 | /* set the timestamp */ | ||
1793 | first->time_stamp = jiffies; | ||
1794 | |||
1795 | /* Force memory writes to complete before letting h/w | 1822 | /* Force memory writes to complete before letting h/w |
1796 | * know there are new descriptors to fetch. (Only | 1823 | * know there are new descriptors to fetch. (Only |
1797 | * applicable for weak-ordered memory model archs, | 1824 | * applicable for weak-ordered memory model archs, |
@@ -1808,8 +1835,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, | |||
1808 | 1835 | ||
1809 | tx_ring->next_to_use = i; | 1836 | tx_ring->next_to_use = i; |
1810 | 1837 | ||
1838 | i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); | ||
1811 | /* notify HW of packet */ | 1839 | /* notify HW of packet */ |
1812 | writel(i, tx_ring->tail); | 1840 | if (!skb->xmit_more || |
1841 | netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, | ||
1842 | tx_ring->queue_index))) | ||
1843 | writel(i, tx_ring->tail); | ||
1813 | 1844 | ||
1814 | return; | 1845 | return; |
1815 | 1846 | ||
@@ -1831,44 +1862,7 @@ dma_error: | |||
1831 | } | 1862 | } |
1832 | 1863 | ||
1833 | /** | 1864 | /** |
1834 | * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions | 1865 | * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed |
1835 | * @tx_ring: the ring to be checked | ||
1836 | * @size: the size buffer we want to assure is available | ||
1837 | * | ||
1838 | * Returns -EBUSY if a stop is needed, else 0 | ||
1839 | **/ | ||
1840 | static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | ||
1841 | { | ||
1842 | netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); | ||
1843 | /* Memory barrier before checking head and tail */ | ||
1844 | smp_mb(); | ||
1845 | |||
1846 | /* Check again in a case another CPU has just made room available. */ | ||
1847 | if (likely(I40E_DESC_UNUSED(tx_ring) < size)) | ||
1848 | return -EBUSY; | ||
1849 | |||
1850 | /* A reprieve! - use start_queue because it doesn't call schedule */ | ||
1851 | netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); | ||
1852 | ++tx_ring->tx_stats.restart_queue; | ||
1853 | return 0; | ||
1854 | } | ||
1855 | |||
1856 | /** | ||
1857 | * i40e_maybe_stop_tx - 1st level check for tx stop conditions | ||
1858 | * @tx_ring: the ring to be checked | ||
1859 | * @size: the size buffer we want to assure is available | ||
1860 | * | ||
1861 | * Returns 0 if stop is not needed | ||
1862 | **/ | ||
1863 | static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | ||
1864 | { | ||
1865 | if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) | ||
1866 | return 0; | ||
1867 | return __i40e_maybe_stop_tx(tx_ring, size); | ||
1868 | } | ||
1869 | |||
1870 | /** | ||
1871 | * i40e_xmit_descriptor_count - calculate number of tx descriptors needed | ||
1872 | * @skb: send buffer | 1866 | * @skb: send buffer |
1873 | * @tx_ring: ring to send buffer on | 1867 | * @tx_ring: ring to send buffer on |
1874 | * | 1868 | * |
@@ -1876,8 +1870,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) | |||
1876 | * there is not enough descriptors available in this ring since we need at least | 1870 | * there is not enough descriptors available in this ring since we need at least |
1877 | * one descriptor. | 1871 | * one descriptor. |
1878 | **/ | 1872 | **/ |
1879 | static int i40e_xmit_descriptor_count(struct sk_buff *skb, | 1873 | static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb, |
1880 | struct i40e_ring *tx_ring) | 1874 | struct i40e_ring *tx_ring) |
1881 | { | 1875 | { |
1882 | unsigned int f; | 1876 | unsigned int f; |
1883 | int count = 0; | 1877 | int count = 0; |
@@ -1892,7 +1886,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, | |||
1892 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); | 1886 | count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); |
1893 | 1887 | ||
1894 | count += TXD_USE_COUNT(skb_headlen(skb)); | 1888 | count += TXD_USE_COUNT(skb_headlen(skb)); |
1895 | if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { | 1889 | if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) { |
1896 | tx_ring->tx_stats.tx_busy++; | 1890 | tx_ring->tx_stats.tx_busy++; |
1897 | return 0; | 1891 | return 0; |
1898 | } | 1892 | } |
@@ -1918,11 +1912,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
1918 | u32 td_cmd = 0; | 1912 | u32 td_cmd = 0; |
1919 | u8 hdr_len = 0; | 1913 | u8 hdr_len = 0; |
1920 | int tso; | 1914 | int tso; |
1921 | if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) | 1915 | if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) |
1922 | return NETDEV_TX_BUSY; | 1916 | return NETDEV_TX_BUSY; |
1923 | 1917 | ||
1924 | /* prepare the xmit flags */ | 1918 | /* prepare the xmit flags */ |
1925 | if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) | 1919 | if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) |
1926 | goto out_drop; | 1920 | goto out_drop; |
1927 | 1921 | ||
1928 | /* obtain protocol of skb */ | 1922 | /* obtain protocol of skb */ |
@@ -1937,7 +1931,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
1937 | else if (protocol == htons(ETH_P_IPV6)) | 1931 | else if (protocol == htons(ETH_P_IPV6)) |
1938 | tx_flags |= I40E_TX_FLAGS_IPV6; | 1932 | tx_flags |= I40E_TX_FLAGS_IPV6; |
1939 | 1933 | ||
1940 | tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len, | 1934 | tso = i40e_tso(tx_ring, skb, &hdr_len, |
1941 | &cd_type_cmd_tso_mss, &cd_tunneling); | 1935 | &cd_type_cmd_tso_mss, &cd_tunneling); |
1942 | 1936 | ||
1943 | if (tso < 0) | 1937 | if (tso < 0) |
@@ -1958,17 +1952,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
1958 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1952 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1959 | tx_flags |= I40E_TX_FLAGS_CSUM; | 1953 | tx_flags |= I40E_TX_FLAGS_CSUM; |
1960 | 1954 | ||
1961 | i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset, | 1955 | i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, |
1962 | tx_ring, &cd_tunneling); | 1956 | tx_ring, &cd_tunneling); |
1963 | } | 1957 | } |
1964 | 1958 | ||
1965 | i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, | 1959 | i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, |
1966 | cd_tunneling, cd_l2tag2); | 1960 | cd_tunneling, cd_l2tag2); |
1967 | 1961 | ||
1968 | i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, | 1962 | i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len, |
1969 | td_cmd, td_offset); | 1963 | td_cmd, td_offset); |
1970 | |||
1971 | i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); | ||
1972 | 1964 | ||
1973 | return NETDEV_TX_OK; | 1965 | return NETDEV_TX_OK; |
1974 | 1966 | ||
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 1e49bb1fbac1..e7a34f899f2c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h | |||
@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t { | |||
138 | #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) | 138 | #define I40E_TX_FLAGS_FCCRC (u32)(1 << 6) |
139 | #define I40E_TX_FLAGS_FSO (u32)(1 << 7) | 139 | #define I40E_TX_FLAGS_FSO (u32)(1 << 7) |
140 | #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) | 140 | #define I40E_TX_FLAGS_FD_SB (u32)(1 << 9) |
141 | #define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10) | ||
141 | #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 | 142 | #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 |
142 | #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 | 143 | #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 |
143 | #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 | 144 | #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 |
@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t { | |||
145 | 146 | ||
146 | struct i40e_tx_buffer { | 147 | struct i40e_tx_buffer { |
147 | struct i40e_tx_desc *next_to_watch; | 148 | struct i40e_tx_desc *next_to_watch; |
148 | unsigned long time_stamp; | ||
149 | union { | 149 | union { |
150 | struct sk_buff *skb; | 150 | struct sk_buff *skb; |
151 | void *raw_buf; | 151 | void *raw_buf; |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index ec9d83a93379..c463ec41579c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h | |||
@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats { | |||
1108 | /* flow director stats */ | 1108 | /* flow director stats */ |
1109 | u64 fd_atr_match; | 1109 | u64 fd_atr_match; |
1110 | u64 fd_sb_match; | 1110 | u64 fd_sb_match; |
1111 | u64 fd_atr_tunnel_match; | ||
1111 | /* EEE LPI */ | 1112 | /* EEE LPI */ |
1112 | u32 tx_lpi_status; | 1113 | u32 tx_lpi_status; |
1113 | u32 rx_lpi_status; | 1114 | u32 rx_lpi_status; |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 1b98c25b3092..fea3b75a9a35 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h | |||
@@ -264,7 +264,6 @@ extern const char i40evf_driver_version[]; | |||
264 | 264 | ||
265 | int i40evf_up(struct i40evf_adapter *adapter); | 265 | int i40evf_up(struct i40evf_adapter *adapter); |
266 | void i40evf_down(struct i40evf_adapter *adapter); | 266 | void i40evf_down(struct i40evf_adapter *adapter); |
267 | void i40evf_reinit_locked(struct i40evf_adapter *adapter); | ||
268 | void i40evf_reset(struct i40evf_adapter *adapter); | 267 | void i40evf_reset(struct i40evf_adapter *adapter); |
269 | void i40evf_set_ethtool_ops(struct net_device *netdev); | 268 | void i40evf_set_ethtool_ops(struct net_device *netdev); |
270 | void i40evf_update_stats(struct i40evf_adapter *adapter); | 269 | void i40evf_update_stats(struct i40evf_adapter *adapter); |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index f4e77665bc54..2b53c870e7f1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | |||
@@ -267,8 +267,10 @@ static int i40evf_set_ringparam(struct net_device *netdev, | |||
267 | adapter->tx_desc_count = new_tx_count; | 267 | adapter->tx_desc_count = new_tx_count; |
268 | adapter->rx_desc_count = new_rx_count; | 268 | adapter->rx_desc_count = new_rx_count; |
269 | 269 | ||
270 | if (netif_running(netdev)) | 270 | if (netif_running(netdev)) { |
271 | i40evf_reinit_locked(adapter); | 271 | adapter->flags |= I40EVF_FLAG_RESET_NEEDED; |
272 | schedule_work(&adapter->reset_task); | ||
273 | } | ||
272 | 274 | ||
273 | return 0; | 275 | return 0; |
274 | } | 276 | } |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 7c53aca4b5a6..4ab4ebba07a1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
@@ -170,7 +170,8 @@ static void i40evf_tx_timeout(struct net_device *netdev) | |||
170 | struct i40evf_adapter *adapter = netdev_priv(netdev); | 170 | struct i40evf_adapter *adapter = netdev_priv(netdev); |
171 | 171 | ||
172 | adapter->tx_timeout_count++; | 172 | adapter->tx_timeout_count++; |
173 | if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { | 173 | if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING | |
174 | I40EVF_FLAG_RESET_NEEDED))) { | ||
174 | adapter->flags |= I40EVF_FLAG_RESET_NEEDED; | 175 | adapter->flags |= I40EVF_FLAG_RESET_NEEDED; |
175 | schedule_work(&adapter->reset_task); | 176 | schedule_work(&adapter->reset_task); |
176 | } | 177 | } |
@@ -1460,7 +1461,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) | |||
1460 | for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { | 1461 | for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { |
1461 | lut = 0; | 1462 | lut = 0; |
1462 | for (j = 0; j < 4; j++) { | 1463 | for (j = 0; j < 4; j++) { |
1463 | if (cqueue == adapter->vsi_res->num_queue_pairs) | 1464 | if (cqueue == adapter->num_active_queues) |
1464 | cqueue = 0; | 1465 | cqueue = 0; |
1465 | lut |= ((cqueue) << (8 * j)); | 1466 | lut |= ((cqueue) << (8 * j)); |
1466 | cqueue++; | 1467 | cqueue++; |
@@ -1470,8 +1471,8 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) | |||
1470 | i40e_flush(hw); | 1471 | i40e_flush(hw); |
1471 | } | 1472 | } |
1472 | 1473 | ||
1473 | #define I40EVF_RESET_WAIT_MS 100 | 1474 | #define I40EVF_RESET_WAIT_MS 10 |
1474 | #define I40EVF_RESET_WAIT_COUNT 200 | 1475 | #define I40EVF_RESET_WAIT_COUNT 500 |
1475 | /** | 1476 | /** |
1476 | * i40evf_reset_task - Call-back task to handle hardware reset | 1477 | * i40evf_reset_task - Call-back task to handle hardware reset |
1477 | * @work: pointer to work_struct | 1478 | * @work: pointer to work_struct |
@@ -1495,10 +1496,17 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1495 | &adapter->crit_section)) | 1496 | &adapter->crit_section)) |
1496 | usleep_range(500, 1000); | 1497 | usleep_range(500, 1000); |
1497 | 1498 | ||
1499 | i40evf_misc_irq_disable(adapter); | ||
1498 | if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { | 1500 | if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { |
1499 | dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); | 1501 | adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; |
1502 | /* Restart the AQ here. If we have been reset but didn't | ||
1503 | * detect it, or if the PF had to reinit, our AQ will be hosed. | ||
1504 | */ | ||
1505 | i40evf_shutdown_adminq(hw); | ||
1506 | i40evf_init_adminq(hw); | ||
1500 | i40evf_request_reset(adapter); | 1507 | i40evf_request_reset(adapter); |
1501 | } | 1508 | } |
1509 | adapter->flags |= I40EVF_FLAG_RESET_PENDING; | ||
1502 | 1510 | ||
1503 | /* poll until we see the reset actually happen */ | 1511 | /* poll until we see the reset actually happen */ |
1504 | for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { | 1512 | for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { |
@@ -1507,10 +1515,10 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1507 | if ((rstat_val != I40E_VFR_VFACTIVE) && | 1515 | if ((rstat_val != I40E_VFR_VFACTIVE) && |
1508 | (rstat_val != I40E_VFR_COMPLETED)) | 1516 | (rstat_val != I40E_VFR_COMPLETED)) |
1509 | break; | 1517 | break; |
1510 | msleep(I40EVF_RESET_WAIT_MS); | 1518 | usleep_range(500, 1000); |
1511 | } | 1519 | } |
1512 | if (i == I40EVF_RESET_WAIT_COUNT) { | 1520 | if (i == I40EVF_RESET_WAIT_COUNT) { |
1513 | adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; | 1521 | dev_info(&adapter->pdev->dev, "Never saw reset\n"); |
1514 | goto continue_reset; /* act like the reset happened */ | 1522 | goto continue_reset; /* act like the reset happened */ |
1515 | } | 1523 | } |
1516 | 1524 | ||
@@ -1518,11 +1526,12 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1518 | for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { | 1526 | for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { |
1519 | rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & | 1527 | rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & |
1520 | I40E_VFGEN_RSTAT_VFR_STATE_MASK; | 1528 | I40E_VFGEN_RSTAT_VFR_STATE_MASK; |
1521 | if ((rstat_val == I40E_VFR_VFACTIVE) || | 1529 | if (rstat_val == I40E_VFR_VFACTIVE) |
1522 | (rstat_val == I40E_VFR_COMPLETED)) | ||
1523 | break; | 1530 | break; |
1524 | msleep(I40EVF_RESET_WAIT_MS); | 1531 | msleep(I40EVF_RESET_WAIT_MS); |
1525 | } | 1532 | } |
1533 | /* extra wait to make sure minimum wait is met */ | ||
1534 | msleep(I40EVF_RESET_WAIT_MS); | ||
1526 | if (i == I40EVF_RESET_WAIT_COUNT) { | 1535 | if (i == I40EVF_RESET_WAIT_COUNT) { |
1527 | struct i40evf_mac_filter *f, *ftmp; | 1536 | struct i40evf_mac_filter *f, *ftmp; |
1528 | struct i40evf_vlan_filter *fv, *fvtmp; | 1537 | struct i40evf_vlan_filter *fv, *fvtmp; |
@@ -1534,11 +1543,10 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1534 | 1543 | ||
1535 | if (netif_running(adapter->netdev)) { | 1544 | if (netif_running(adapter->netdev)) { |
1536 | set_bit(__I40E_DOWN, &adapter->vsi.state); | 1545 | set_bit(__I40E_DOWN, &adapter->vsi.state); |
1537 | i40evf_irq_disable(adapter); | ||
1538 | i40evf_napi_disable_all(adapter); | ||
1539 | netif_tx_disable(netdev); | ||
1540 | netif_tx_stop_all_queues(netdev); | ||
1541 | netif_carrier_off(netdev); | 1546 | netif_carrier_off(netdev); |
1547 | netif_tx_disable(netdev); | ||
1548 | i40evf_napi_disable_all(adapter); | ||
1549 | i40evf_irq_disable(adapter); | ||
1542 | i40evf_free_traffic_irqs(adapter); | 1550 | i40evf_free_traffic_irqs(adapter); |
1543 | i40evf_free_all_tx_resources(adapter); | 1551 | i40evf_free_all_tx_resources(adapter); |
1544 | i40evf_free_all_rx_resources(adapter); | 1552 | i40evf_free_all_rx_resources(adapter); |
@@ -1550,6 +1558,7 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1550 | list_del(&f->list); | 1558 | list_del(&f->list); |
1551 | kfree(f); | 1559 | kfree(f); |
1552 | } | 1560 | } |
1561 | |||
1553 | list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, | 1562 | list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, |
1554 | list) { | 1563 | list) { |
1555 | list_del(&fv->list); | 1564 | list_del(&fv->list); |
@@ -1564,22 +1573,27 @@ static void i40evf_reset_task(struct work_struct *work) | |||
1564 | i40evf_shutdown_adminq(hw); | 1573 | i40evf_shutdown_adminq(hw); |
1565 | adapter->netdev->flags &= ~IFF_UP; | 1574 | adapter->netdev->flags &= ~IFF_UP; |
1566 | clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); | 1575 | clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); |
1576 | adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; | ||
1577 | dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); | ||
1567 | return; /* Do not attempt to reinit. It's dead, Jim. */ | 1578 | return; /* Do not attempt to reinit. It's dead, Jim. */ |
1568 | } | 1579 | } |
1569 | 1580 | ||
1570 | continue_reset: | 1581 | continue_reset: |
1571 | adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; | ||
1572 | |||
1573 | i40evf_irq_disable(adapter); | ||
1574 | |||
1575 | if (netif_running(adapter->netdev)) { | 1582 | if (netif_running(adapter->netdev)) { |
1576 | i40evf_napi_disable_all(adapter); | ||
1577 | netif_tx_disable(netdev); | ||
1578 | netif_tx_stop_all_queues(netdev); | ||
1579 | netif_carrier_off(netdev); | 1583 | netif_carrier_off(netdev); |
1584 | netif_tx_stop_all_queues(netdev); | ||
1585 | i40evf_napi_disable_all(adapter); | ||
1580 | } | 1586 | } |
1587 | i40evf_irq_disable(adapter); | ||
1581 | 1588 | ||
1582 | adapter->state = __I40EVF_RESETTING; | 1589 | adapter->state = __I40EVF_RESETTING; |
1590 | adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; | ||
1591 | |||
1592 | /* free the Tx/Rx rings and descriptors, might be better to just | ||
1593 | * re-use them sometime in the future | ||
1594 | */ | ||
1595 | i40evf_free_all_rx_resources(adapter); | ||
1596 | i40evf_free_all_tx_resources(adapter); | ||
1583 | 1597 | ||
1584 | /* kill and reinit the admin queue */ | 1598 | /* kill and reinit the admin queue */ |
1585 | if (i40evf_shutdown_adminq(hw)) | 1599 | if (i40evf_shutdown_adminq(hw)) |
@@ -1603,6 +1617,7 @@ continue_reset: | |||
1603 | adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; | 1617 | adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; |
1604 | adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; | 1618 | adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; |
1605 | clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); | 1619 | clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); |
1620 | i40evf_misc_irq_enable(adapter); | ||
1606 | 1621 | ||
1607 | mod_timer(&adapter->watchdog_timer, jiffies + 2); | 1622 | mod_timer(&adapter->watchdog_timer, jiffies + 2); |
1608 | 1623 | ||
@@ -1624,7 +1639,10 @@ continue_reset: | |||
1624 | goto reset_err; | 1639 | goto reset_err; |
1625 | 1640 | ||
1626 | i40evf_irq_enable(adapter, true); | 1641 | i40evf_irq_enable(adapter, true); |
1642 | } else { | ||
1643 | adapter->state = __I40EVF_DOWN; | ||
1627 | } | 1644 | } |
1645 | |||
1628 | return; | 1646 | return; |
1629 | reset_err: | 1647 | reset_err: |
1630 | dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); | 1648 | dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); |
@@ -1667,6 +1685,11 @@ static void i40evf_adminq_task(struct work_struct *work) | |||
1667 | memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); | 1685 | memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); |
1668 | } while (pending); | 1686 | } while (pending); |
1669 | 1687 | ||
1688 | if ((adapter->flags & | ||
1689 | (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || | ||
1690 | adapter->state == __I40EVF_RESETTING) | ||
1691 | goto freedom; | ||
1692 | |||
1670 | /* check for error indications */ | 1693 | /* check for error indications */ |
1671 | val = rd32(hw, hw->aq.arq.len); | 1694 | val = rd32(hw, hw->aq.arq.len); |
1672 | oldval = val; | 1695 | oldval = val; |
@@ -1702,6 +1725,7 @@ static void i40evf_adminq_task(struct work_struct *work) | |||
1702 | if (oldval != val) | 1725 | if (oldval != val) |
1703 | wr32(hw, hw->aq.asq.len, val); | 1726 | wr32(hw, hw->aq.asq.len, val); |
1704 | 1727 | ||
1728 | freedom: | ||
1705 | kfree(event.msg_buf); | 1729 | kfree(event.msg_buf); |
1706 | out: | 1730 | out: |
1707 | /* re-enable Admin queue interrupt cause */ | 1731 | /* re-enable Admin queue interrupt cause */ |
@@ -1897,47 +1921,6 @@ static struct net_device_stats *i40evf_get_stats(struct net_device *netdev) | |||
1897 | } | 1921 | } |
1898 | 1922 | ||
1899 | /** | 1923 | /** |
1900 | * i40evf_reinit_locked - Software reinit | ||
1901 | * @adapter: board private structure | ||
1902 | * | ||
1903 | * Reinititalizes the ring structures in response to a software configuration | ||
1904 | * change. Roughly the same as close followed by open, but skips releasing | ||
1905 | * and reallocating the interrupts. | ||
1906 | **/ | ||
1907 | void i40evf_reinit_locked(struct i40evf_adapter *adapter) | ||
1908 | { | ||
1909 | struct net_device *netdev = adapter->netdev; | ||
1910 | int err; | ||
1911 | |||
1912 | WARN_ON(in_interrupt()); | ||
1913 | |||
1914 | i40evf_down(adapter); | ||
1915 | |||
1916 | /* allocate transmit descriptors */ | ||
1917 | err = i40evf_setup_all_tx_resources(adapter); | ||
1918 | if (err) | ||
1919 | goto err_reinit; | ||
1920 | |||
1921 | /* allocate receive descriptors */ | ||
1922 | err = i40evf_setup_all_rx_resources(adapter); | ||
1923 | if (err) | ||
1924 | goto err_reinit; | ||
1925 | |||
1926 | i40evf_configure(adapter); | ||
1927 | |||
1928 | err = i40evf_up_complete(adapter); | ||
1929 | if (err) | ||
1930 | goto err_reinit; | ||
1931 | |||
1932 | i40evf_irq_enable(adapter, true); | ||
1933 | return; | ||
1934 | |||
1935 | err_reinit: | ||
1936 | dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); | ||
1937 | i40evf_close(netdev); | ||
1938 | } | ||
1939 | |||
1940 | /** | ||
1941 | * i40evf_change_mtu - Change the Maximum Transfer Unit | 1924 | * i40evf_change_mtu - Change the Maximum Transfer Unit |
1942 | * @netdev: network interface device structure | 1925 | * @netdev: network interface device structure |
1943 | * @new_mtu: new value for maximum frame size | 1926 | * @new_mtu: new value for maximum frame size |
@@ -1952,9 +1935,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) | |||
1952 | if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) | 1935 | if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) |
1953 | return -EINVAL; | 1936 | return -EINVAL; |
1954 | 1937 | ||
1955 | /* must set new MTU before calling down or up */ | ||
1956 | netdev->mtu = new_mtu; | 1938 | netdev->mtu = new_mtu; |
1957 | i40evf_reinit_locked(adapter); | 1939 | adapter->flags |= I40EVF_FLAG_RESET_NEEDED; |
1940 | schedule_work(&adapter->reset_task); | ||
1941 | |||
1958 | return 0; | 1942 | return 0; |
1959 | } | 1943 | } |
1960 | 1944 | ||