diff options
author | David S. Miller <davem@davemloft.net> | 2018-04-08 12:39:47 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-04-08 12:39:47 -0400 |
commit | 4e31a6845f673a70a63688a82051ce1451c742bc (patch) | |
tree | bf407ed2e5cb059ec44d637240969abd095ba4e1 | |
parent | e41f0548473eb7b6499bd8482474e30ae6d31220 (diff) | |
parent | 30f796258c49baa313222456bcf5b0246da55ff1 (diff) |
Merge branch 'ibmvnic-Fix-driver-reset-and-DMA-bugs'
Thomas Falcon says:
====================
ibmvnic: Fix driver reset and DMA bugs
This patch series introduces some fixes to the driver reset
routines and a patch that fixes mistakes caught by the kernel
DMA debugger.
The reset fixes include a fix to reset TX queue counters properly
after a reset as well as updates to driver reset error-handling code.
It also provides updates to the reset handling routine for redundant
backing VF failover and partition migration cases.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.c | 146 | ||||
-rw-r--r-- | drivers/net/ethernet/ibm/ibmvnic.h | 1 |
2 files changed, 98 insertions, 49 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b492af6affc3..aad5658d79d5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -118,6 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); | |||
118 | static int ibmvnic_init(struct ibmvnic_adapter *); | 118 | static int ibmvnic_init(struct ibmvnic_adapter *); |
119 | static void release_crq_queue(struct ibmvnic_adapter *); | 119 | static void release_crq_queue(struct ibmvnic_adapter *); |
120 | static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); | 120 | static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); |
121 | static int init_crq_queue(struct ibmvnic_adapter *adapter); | ||
121 | 122 | ||
122 | struct ibmvnic_stat { | 123 | struct ibmvnic_stat { |
123 | char name[ETH_GSTRING_LEN]; | 124 | char name[ETH_GSTRING_LEN]; |
@@ -320,18 +321,16 @@ failure: | |||
320 | dev_info(dev, "replenish pools failure\n"); | 321 | dev_info(dev, "replenish pools failure\n"); |
321 | pool->free_map[pool->next_free] = index; | 322 | pool->free_map[pool->next_free] = index; |
322 | pool->rx_buff[index].skb = NULL; | 323 | pool->rx_buff[index].skb = NULL; |
323 | if (!dma_mapping_error(dev, dma_addr)) | ||
324 | dma_unmap_single(dev, dma_addr, pool->buff_size, | ||
325 | DMA_FROM_DEVICE); | ||
326 | 324 | ||
327 | dev_kfree_skb_any(skb); | 325 | dev_kfree_skb_any(skb); |
328 | adapter->replenish_add_buff_failure++; | 326 | adapter->replenish_add_buff_failure++; |
329 | atomic_add(buffers_added, &pool->available); | 327 | atomic_add(buffers_added, &pool->available); |
330 | 328 | ||
331 | if (lpar_rc == H_CLOSED) { | 329 | if (lpar_rc == H_CLOSED || adapter->failover_pending) { |
332 | /* Disable buffer pool replenishment and report carrier off if | 330 | /* Disable buffer pool replenishment and report carrier off if |
333 | * queue is closed. Firmware guarantees that a signal will | 331 | * queue is closed or pending failover. |
334 | * be sent to the driver, triggering a reset. | 332 | * Firmware guarantees that a signal will be sent to the |
333 | * driver, triggering a reset. | ||
335 | */ | 334 | */ |
336 | deactivate_rx_pools(adapter); | 335 | deactivate_rx_pools(adapter); |
337 | netif_carrier_off(adapter->netdev); | 336 | netif_carrier_off(adapter->netdev); |
@@ -1071,6 +1070,14 @@ static int ibmvnic_open(struct net_device *netdev) | |||
1071 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 1070 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
1072 | int rc; | 1071 | int rc; |
1073 | 1072 | ||
1073 | /* If device failover is pending, just set device state and return. | ||
1074 | * Device operation will be handled by reset routine. | ||
1075 | */ | ||
1076 | if (adapter->failover_pending) { | ||
1077 | adapter->state = VNIC_OPEN; | ||
1078 | return 0; | ||
1079 | } | ||
1080 | |||
1074 | mutex_lock(&adapter->reset_lock); | 1081 | mutex_lock(&adapter->reset_lock); |
1075 | 1082 | ||
1076 | if (adapter->state != VNIC_CLOSED) { | 1083 | if (adapter->state != VNIC_CLOSED) { |
@@ -1218,7 +1225,6 @@ static int __ibmvnic_close(struct net_device *netdev) | |||
1218 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); | 1225 | rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); |
1219 | if (rc) | 1226 | if (rc) |
1220 | return rc; | 1227 | return rc; |
1221 | ibmvnic_cleanup(netdev); | ||
1222 | adapter->state = VNIC_CLOSED; | 1228 | adapter->state = VNIC_CLOSED; |
1223 | return 0; | 1229 | return 0; |
1224 | } | 1230 | } |
@@ -1228,8 +1234,17 @@ static int ibmvnic_close(struct net_device *netdev) | |||
1228 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 1234 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
1229 | int rc; | 1235 | int rc; |
1230 | 1236 | ||
1237 | /* If device failover is pending, just set device state and return. | ||
1238 | * Device operation will be handled by reset routine. | ||
1239 | */ | ||
1240 | if (adapter->failover_pending) { | ||
1241 | adapter->state = VNIC_CLOSED; | ||
1242 | return 0; | ||
1243 | } | ||
1244 | |||
1231 | mutex_lock(&adapter->reset_lock); | 1245 | mutex_lock(&adapter->reset_lock); |
1232 | rc = __ibmvnic_close(netdev); | 1246 | rc = __ibmvnic_close(netdev); |
1247 | ibmvnic_cleanup(netdev); | ||
1233 | mutex_unlock(&adapter->reset_lock); | 1248 | mutex_unlock(&adapter->reset_lock); |
1234 | 1249 | ||
1235 | return rc; | 1250 | return rc; |
@@ -1562,8 +1577,9 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
1562 | dev_kfree_skb_any(skb); | 1577 | dev_kfree_skb_any(skb); |
1563 | tx_buff->skb = NULL; | 1578 | tx_buff->skb = NULL; |
1564 | 1579 | ||
1565 | if (lpar_rc == H_CLOSED) { | 1580 | if (lpar_rc == H_CLOSED || adapter->failover_pending) { |
1566 | /* Disable TX and report carrier off if queue is closed. | 1581 | /* Disable TX and report carrier off if queue is closed |
1582 | * or pending failover. | ||
1567 | * Firmware guarantees that a signal will be sent to the | 1583 | * Firmware guarantees that a signal will be sent to the |
1568 | * driver, triggering a reset or some other action. | 1584 | * driver, triggering a reset or some other action. |
1569 | */ | 1585 | */ |
@@ -1711,14 +1727,10 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
1711 | old_num_rx_queues = adapter->req_rx_queues; | 1727 | old_num_rx_queues = adapter->req_rx_queues; |
1712 | old_num_tx_queues = adapter->req_tx_queues; | 1728 | old_num_tx_queues = adapter->req_tx_queues; |
1713 | 1729 | ||
1714 | if (rwi->reset_reason == VNIC_RESET_MOBILITY) { | 1730 | ibmvnic_cleanup(netdev); |
1715 | rc = ibmvnic_reenable_crq_queue(adapter); | 1731 | |
1716 | if (rc) | 1732 | if (adapter->reset_reason != VNIC_RESET_MOBILITY && |
1717 | return 0; | 1733 | adapter->reset_reason != VNIC_RESET_FAILOVER) { |
1718 | ibmvnic_cleanup(netdev); | ||
1719 | } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) { | ||
1720 | ibmvnic_cleanup(netdev); | ||
1721 | } else { | ||
1722 | rc = __ibmvnic_close(netdev); | 1734 | rc = __ibmvnic_close(netdev); |
1723 | if (rc) | 1735 | if (rc) |
1724 | return rc; | 1736 | return rc; |
@@ -1737,6 +1749,23 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
1737 | */ | 1749 | */ |
1738 | adapter->state = VNIC_PROBED; | 1750 | adapter->state = VNIC_PROBED; |
1739 | 1751 | ||
1752 | if (adapter->wait_for_reset) { | ||
1753 | rc = init_crq_queue(adapter); | ||
1754 | } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) { | ||
1755 | rc = ibmvnic_reenable_crq_queue(adapter); | ||
1756 | release_sub_crqs(adapter, 1); | ||
1757 | } else { | ||
1758 | rc = ibmvnic_reset_crq(adapter); | ||
1759 | if (!rc) | ||
1760 | rc = vio_enable_interrupts(adapter->vdev); | ||
1761 | } | ||
1762 | |||
1763 | if (rc) { | ||
1764 | netdev_err(adapter->netdev, | ||
1765 | "Couldn't initialize crq. rc=%d\n", rc); | ||
1766 | return rc; | ||
1767 | } | ||
1768 | |||
1740 | rc = ibmvnic_init(adapter); | 1769 | rc = ibmvnic_init(adapter); |
1741 | if (rc) | 1770 | if (rc) |
1742 | return IBMVNIC_INIT_FAILED; | 1771 | return IBMVNIC_INIT_FAILED; |
@@ -1878,23 +1907,26 @@ static void __ibmvnic_reset(struct work_struct *work) | |||
1878 | mutex_unlock(&adapter->reset_lock); | 1907 | mutex_unlock(&adapter->reset_lock); |
1879 | } | 1908 | } |
1880 | 1909 | ||
1881 | static void ibmvnic_reset(struct ibmvnic_adapter *adapter, | 1910 | static int ibmvnic_reset(struct ibmvnic_adapter *adapter, |
1882 | enum ibmvnic_reset_reason reason) | 1911 | enum ibmvnic_reset_reason reason) |
1883 | { | 1912 | { |
1884 | struct ibmvnic_rwi *rwi, *tmp; | 1913 | struct ibmvnic_rwi *rwi, *tmp; |
1885 | struct net_device *netdev = adapter->netdev; | 1914 | struct net_device *netdev = adapter->netdev; |
1886 | struct list_head *entry; | 1915 | struct list_head *entry; |
1916 | int ret; | ||
1887 | 1917 | ||
1888 | if (adapter->state == VNIC_REMOVING || | 1918 | if (adapter->state == VNIC_REMOVING || |
1889 | adapter->state == VNIC_REMOVED) { | 1919 | adapter->state == VNIC_REMOVED || |
1890 | netdev_dbg(netdev, "Adapter removing, skipping reset\n"); | 1920 | adapter->failover_pending) { |
1891 | return; | 1921 | ret = EBUSY; |
1922 | netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); | ||
1923 | goto err; | ||
1892 | } | 1924 | } |
1893 | 1925 | ||
1894 | if (adapter->state == VNIC_PROBING) { | 1926 | if (adapter->state == VNIC_PROBING) { |
1895 | netdev_warn(netdev, "Adapter reset during probe\n"); | 1927 | netdev_warn(netdev, "Adapter reset during probe\n"); |
1896 | adapter->init_done_rc = EAGAIN; | 1928 | ret = adapter->init_done_rc = EAGAIN; |
1897 | return; | 1929 | goto err; |
1898 | } | 1930 | } |
1899 | 1931 | ||
1900 | mutex_lock(&adapter->rwi_lock); | 1932 | mutex_lock(&adapter->rwi_lock); |
@@ -1904,7 +1936,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, | |||
1904 | if (tmp->reset_reason == reason) { | 1936 | if (tmp->reset_reason == reason) { |
1905 | netdev_dbg(netdev, "Skipping matching reset\n"); | 1937 | netdev_dbg(netdev, "Skipping matching reset\n"); |
1906 | mutex_unlock(&adapter->rwi_lock); | 1938 | mutex_unlock(&adapter->rwi_lock); |
1907 | return; | 1939 | ret = EBUSY; |
1940 | goto err; | ||
1908 | } | 1941 | } |
1909 | } | 1942 | } |
1910 | 1943 | ||
@@ -1912,7 +1945,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, | |||
1912 | if (!rwi) { | 1945 | if (!rwi) { |
1913 | mutex_unlock(&adapter->rwi_lock); | 1946 | mutex_unlock(&adapter->rwi_lock); |
1914 | ibmvnic_close(netdev); | 1947 | ibmvnic_close(netdev); |
1915 | return; | 1948 | ret = ENOMEM; |
1949 | goto err; | ||
1916 | } | 1950 | } |
1917 | 1951 | ||
1918 | rwi->reset_reason = reason; | 1952 | rwi->reset_reason = reason; |
@@ -1921,6 +1955,12 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, | |||
1921 | 1955 | ||
1922 | netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); | 1956 | netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); |
1923 | schedule_work(&adapter->ibmvnic_reset); | 1957 | schedule_work(&adapter->ibmvnic_reset); |
1958 | |||
1959 | return 0; | ||
1960 | err: | ||
1961 | if (adapter->wait_for_reset) | ||
1962 | adapter->wait_for_reset = false; | ||
1963 | return -ret; | ||
1924 | } | 1964 | } |
1925 | 1965 | ||
1926 | static void ibmvnic_tx_timeout(struct net_device *dev) | 1966 | static void ibmvnic_tx_timeout(struct net_device *dev) |
@@ -2055,6 +2095,8 @@ static void ibmvnic_netpoll_controller(struct net_device *dev) | |||
2055 | 2095 | ||
2056 | static int wait_for_reset(struct ibmvnic_adapter *adapter) | 2096 | static int wait_for_reset(struct ibmvnic_adapter *adapter) |
2057 | { | 2097 | { |
2098 | int rc, ret; | ||
2099 | |||
2058 | adapter->fallback.mtu = adapter->req_mtu; | 2100 | adapter->fallback.mtu = adapter->req_mtu; |
2059 | adapter->fallback.rx_queues = adapter->req_rx_queues; | 2101 | adapter->fallback.rx_queues = adapter->req_rx_queues; |
2060 | adapter->fallback.tx_queues = adapter->req_tx_queues; | 2102 | adapter->fallback.tx_queues = adapter->req_tx_queues; |
@@ -2062,11 +2104,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) | |||
2062 | adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; | 2104 | adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; |
2063 | 2105 | ||
2064 | init_completion(&adapter->reset_done); | 2106 | init_completion(&adapter->reset_done); |
2065 | ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); | ||
2066 | adapter->wait_for_reset = true; | 2107 | adapter->wait_for_reset = true; |
2108 | rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); | ||
2109 | if (rc) | ||
2110 | return rc; | ||
2067 | wait_for_completion(&adapter->reset_done); | 2111 | wait_for_completion(&adapter->reset_done); |
2068 | 2112 | ||
2113 | ret = 0; | ||
2069 | if (adapter->reset_done_rc) { | 2114 | if (adapter->reset_done_rc) { |
2115 | ret = -EIO; | ||
2070 | adapter->desired.mtu = adapter->fallback.mtu; | 2116 | adapter->desired.mtu = adapter->fallback.mtu; |
2071 | adapter->desired.rx_queues = adapter->fallback.rx_queues; | 2117 | adapter->desired.rx_queues = adapter->fallback.rx_queues; |
2072 | adapter->desired.tx_queues = adapter->fallback.tx_queues; | 2118 | adapter->desired.tx_queues = adapter->fallback.tx_queues; |
@@ -2074,12 +2120,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter) | |||
2074 | adapter->desired.tx_entries = adapter->fallback.tx_entries; | 2120 | adapter->desired.tx_entries = adapter->fallback.tx_entries; |
2075 | 2121 | ||
2076 | init_completion(&adapter->reset_done); | 2122 | init_completion(&adapter->reset_done); |
2077 | ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); | 2123 | adapter->wait_for_reset = true; |
2124 | rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); | ||
2125 | if (rc) | ||
2126 | return ret; | ||
2078 | wait_for_completion(&adapter->reset_done); | 2127 | wait_for_completion(&adapter->reset_done); |
2079 | } | 2128 | } |
2080 | adapter->wait_for_reset = false; | 2129 | adapter->wait_for_reset = false; |
2081 | 2130 | ||
2082 | return adapter->reset_done_rc; | 2131 | return ret; |
2083 | } | 2132 | } |
2084 | 2133 | ||
2085 | static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) | 2134 | static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) |
@@ -2364,6 +2413,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, | |||
2364 | } | 2413 | } |
2365 | 2414 | ||
2366 | memset(scrq->msgs, 0, 4 * PAGE_SIZE); | 2415 | memset(scrq->msgs, 0, 4 * PAGE_SIZE); |
2416 | atomic_set(&scrq->used, 0); | ||
2367 | scrq->cur = 0; | 2417 | scrq->cur = 0; |
2368 | 2418 | ||
2369 | rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, | 2419 | rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, |
@@ -2574,7 +2624,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, | |||
2574 | union sub_crq *next; | 2624 | union sub_crq *next; |
2575 | int index; | 2625 | int index; |
2576 | int i, j; | 2626 | int i, j; |
2577 | u8 first; | 2627 | u8 *first; |
2578 | 2628 | ||
2579 | restart_loop: | 2629 | restart_loop: |
2580 | while (pending_scrq(adapter, scrq)) { | 2630 | while (pending_scrq(adapter, scrq)) { |
@@ -2605,11 +2655,12 @@ restart_loop: | |||
2605 | txbuff->data_dma[j] = 0; | 2655 | txbuff->data_dma[j] = 0; |
2606 | } | 2656 | } |
2607 | /* if sub_crq was sent indirectly */ | 2657 | /* if sub_crq was sent indirectly */ |
2608 | first = txbuff->indir_arr[0].generic.first; | 2658 | first = &txbuff->indir_arr[0].generic.first; |
2609 | if (first == IBMVNIC_CRQ_CMD) { | 2659 | if (*first == IBMVNIC_CRQ_CMD) { |
2610 | dma_unmap_single(dev, txbuff->indir_dma, | 2660 | dma_unmap_single(dev, txbuff->indir_dma, |
2611 | sizeof(txbuff->indir_arr), | 2661 | sizeof(txbuff->indir_arr), |
2612 | DMA_TO_DEVICE); | 2662 | DMA_TO_DEVICE); |
2663 | *first = 0; | ||
2613 | } | 2664 | } |
2614 | 2665 | ||
2615 | if (txbuff->last_frag) { | 2666 | if (txbuff->last_frag) { |
@@ -3882,9 +3933,9 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, | |||
3882 | int i; | 3933 | int i; |
3883 | 3934 | ||
3884 | dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, | 3935 | dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, |
3885 | DMA_BIDIRECTIONAL); | 3936 | DMA_TO_DEVICE); |
3886 | dma_unmap_single(dev, adapter->login_rsp_buf_token, | 3937 | dma_unmap_single(dev, adapter->login_rsp_buf_token, |
3887 | adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); | 3938 | adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); |
3888 | 3939 | ||
3889 | /* If the number of queues requested can't be allocated by the | 3940 | /* If the number of queues requested can't be allocated by the |
3890 | * server, the login response will return with code 1. We will need | 3941 | * server, the login response will return with code 1. We will need |
@@ -4144,7 +4195,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, | |||
4144 | case IBMVNIC_CRQ_INIT: | 4195 | case IBMVNIC_CRQ_INIT: |
4145 | dev_info(dev, "Partner initialized\n"); | 4196 | dev_info(dev, "Partner initialized\n"); |
4146 | adapter->from_passive_init = true; | 4197 | adapter->from_passive_init = true; |
4198 | adapter->failover_pending = false; | ||
4147 | complete(&adapter->init_done); | 4199 | complete(&adapter->init_done); |
4200 | ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); | ||
4148 | break; | 4201 | break; |
4149 | case IBMVNIC_CRQ_INIT_COMPLETE: | 4202 | case IBMVNIC_CRQ_INIT_COMPLETE: |
4150 | dev_info(dev, "Partner initialization complete\n"); | 4203 | dev_info(dev, "Partner initialization complete\n"); |
@@ -4161,7 +4214,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, | |||
4161 | ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); | 4214 | ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); |
4162 | } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { | 4215 | } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { |
4163 | dev_info(dev, "Backing device failover detected\n"); | 4216 | dev_info(dev, "Backing device failover detected\n"); |
4164 | ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); | 4217 | adapter->failover_pending = true; |
4165 | } else { | 4218 | } else { |
4166 | /* The adapter lost the connection */ | 4219 | /* The adapter lost the connection */ |
4167 | dev_err(dev, "Virtual Adapter failed (rc=%d)\n", | 4220 | dev_err(dev, "Virtual Adapter failed (rc=%d)\n", |
@@ -4461,19 +4514,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) | |||
4461 | u64 old_num_rx_queues, old_num_tx_queues; | 4514 | u64 old_num_rx_queues, old_num_tx_queues; |
4462 | int rc; | 4515 | int rc; |
4463 | 4516 | ||
4464 | if (adapter->resetting && !adapter->wait_for_reset) { | ||
4465 | rc = ibmvnic_reset_crq(adapter); | ||
4466 | if (!rc) | ||
4467 | rc = vio_enable_interrupts(adapter->vdev); | ||
4468 | } else { | ||
4469 | rc = init_crq_queue(adapter); | ||
4470 | } | ||
4471 | |||
4472 | if (rc) { | ||
4473 | dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); | ||
4474 | return rc; | ||
4475 | } | ||
4476 | |||
4477 | adapter->from_passive_init = false; | 4517 | adapter->from_passive_init = false; |
4478 | 4518 | ||
4479 | old_num_rx_queues = adapter->req_rx_queues; | 4519 | old_num_rx_queues = adapter->req_rx_queues; |
@@ -4498,7 +4538,8 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) | |||
4498 | return -1; | 4538 | return -1; |
4499 | } | 4539 | } |
4500 | 4540 | ||
4501 | if (adapter->resetting && !adapter->wait_for_reset) { | 4541 | if (adapter->resetting && !adapter->wait_for_reset && |
4542 | adapter->reset_reason != VNIC_RESET_MOBILITY) { | ||
4502 | if (adapter->req_rx_queues != old_num_rx_queues || | 4543 | if (adapter->req_rx_queues != old_num_rx_queues || |
4503 | adapter->req_tx_queues != old_num_tx_queues) { | 4544 | adapter->req_tx_queues != old_num_tx_queues) { |
4504 | release_sub_crqs(adapter, 0); | 4545 | release_sub_crqs(adapter, 0); |
@@ -4586,6 +4627,13 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
4586 | adapter->mac_change_pending = false; | 4627 | adapter->mac_change_pending = false; |
4587 | 4628 | ||
4588 | do { | 4629 | do { |
4630 | rc = init_crq_queue(adapter); | ||
4631 | if (rc) { | ||
4632 | dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", | ||
4633 | rc); | ||
4634 | goto ibmvnic_init_fail; | ||
4635 | } | ||
4636 | |||
4589 | rc = ibmvnic_init(adapter); | 4637 | rc = ibmvnic_init(adapter); |
4590 | if (rc && rc != EAGAIN) | 4638 | if (rc && rc != EAGAIN) |
4591 | goto ibmvnic_init_fail; | 4639 | goto ibmvnic_init_fail; |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 89efe700eafe..99c0b58c2c39 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h | |||
@@ -1108,6 +1108,7 @@ struct ibmvnic_adapter { | |||
1108 | bool napi_enabled, from_passive_init; | 1108 | bool napi_enabled, from_passive_init; |
1109 | 1109 | ||
1110 | bool mac_change_pending; | 1110 | bool mac_change_pending; |
1111 | bool failover_pending; | ||
1111 | 1112 | ||
1112 | struct ibmvnic_tunables desired; | 1113 | struct ibmvnic_tunables desired; |
1113 | struct ibmvnic_tunables fallback; | 1114 | struct ibmvnic_tunables fallback; |