aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-02-27 14:31:20 -0500
committerDavid S. Miller <davem@davemloft.net>2018-02-27 14:31:20 -0500
commit63d638012e78ed2eb7bf484c30d2cc3263c3b5a1 (patch)
treee88319524a271f67a16e70deabe59e37dc942ce6
parent51846bfef65951ee6148e03217d31acff8d039a6 (diff)
parent20a8ab744ff799ccedd35aba0d3139782f341bed (diff)
Merge branch 'ibmvnic-Miscellaneous-driver-fixes-and-enhancements'
Thomas Falcon says: ==================== ibmvnic: Miscellaneous driver fixes and enhancements There is not a general theme to this patch set other than that it fixes a few issues with the ibmvnic driver. I will just give a quick summary of what each patch does here. "ibmvnic: Fix TX descriptor tracking again" resolves a race condition introduced in an earlier fix to track outstanding transmit descriptors. This condition can throw off the tracking counter to the point that a transmit queue will halt forever. "ibmvnic: Allocate statistics buffers during probe" allocates queue statistics buffers on device probe to avoid a crash when accessing statistics of an unopened interface. "ibmvnic: Harden TX/RX pool cleaning" includes additional checks to avoid a bad access when cleaning RX and TX buffer pools during a device reset. "ibmvnic: Report queue stops and restarts as debug output" changes TX queue state notifications from informational to debug messages. This information is not necessarily useful to a user and under load can result in a lot of log output. "ibmvnic: Do not attempt to login if RX or TX queues are not allocated" checks that device queues have been allocated successfully before attempting device login. This resolves a panic that could occur if a user attempted to configure a device after a failed reset. Thanks for your attention. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c71
1 files changed, 43 insertions, 28 deletions
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5a86a916492c..765407179fdd 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -111,7 +111,7 @@ static int ibmvnic_poll(struct napi_struct *napi, int data);
111static void send_map_query(struct ibmvnic_adapter *adapter); 111static void send_map_query(struct ibmvnic_adapter *adapter);
112static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); 112static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113static void send_request_unmap(struct ibmvnic_adapter *, u8); 113static void send_request_unmap(struct ibmvnic_adapter *, u8);
114static void send_login(struct ibmvnic_adapter *adapter); 114static int send_login(struct ibmvnic_adapter *adapter);
115static void send_cap_queries(struct ibmvnic_adapter *adapter); 115static void send_cap_queries(struct ibmvnic_adapter *adapter);
116static int init_sub_crqs(struct ibmvnic_adapter *); 116static int init_sub_crqs(struct ibmvnic_adapter *);
117static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 117static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
@@ -809,8 +809,11 @@ static int ibmvnic_login(struct net_device *netdev)
809 } 809 }
810 810
811 reinit_completion(&adapter->init_done); 811 reinit_completion(&adapter->init_done);
812 send_login(adapter); 812 rc = send_login(adapter);
813 if (!wait_for_completion_timeout(&adapter->init_done, 813 if (rc) {
814 dev_err(dev, "Unable to attempt device login\n");
815 return rc;
816 } else if (!wait_for_completion_timeout(&adapter->init_done,
814 timeout)) { 817 timeout)) {
815 dev_err(dev, "Login timeout\n"); 818 dev_err(dev, "Login timeout\n");
816 return -1; 819 return -1;
@@ -845,8 +848,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
845 release_tx_pools(adapter); 848 release_tx_pools(adapter);
846 release_rx_pools(adapter); 849 release_rx_pools(adapter);
847 850
848 release_stats_token(adapter);
849 release_stats_buffers(adapter);
850 release_error_buffers(adapter); 851 release_error_buffers(adapter);
851 release_napi(adapter); 852 release_napi(adapter);
852 release_login_rsp_buffer(adapter); 853 release_login_rsp_buffer(adapter);
@@ -974,14 +975,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
974 if (rc) 975 if (rc)
975 return rc; 976 return rc;
976 977
977 rc = init_stats_buffers(adapter);
978 if (rc)
979 return rc;
980
981 rc = init_stats_token(adapter);
982 if (rc)
983 return rc;
984
985 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); 978 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
986 if (!adapter->vpd) 979 if (!adapter->vpd)
987 return -ENOMEM; 980 return -ENOMEM;
@@ -1091,6 +1084,7 @@ static int ibmvnic_open(struct net_device *netdev)
1091static void clean_rx_pools(struct ibmvnic_adapter *adapter) 1084static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1092{ 1085{
1093 struct ibmvnic_rx_pool *rx_pool; 1086 struct ibmvnic_rx_pool *rx_pool;
1087 struct ibmvnic_rx_buff *rx_buff;
1094 u64 rx_entries; 1088 u64 rx_entries;
1095 int rx_scrqs; 1089 int rx_scrqs;
1096 int i, j; 1090 int i, j;
@@ -1104,14 +1098,15 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1104 /* Free any remaining skbs in the rx buffer pools */ 1098 /* Free any remaining skbs in the rx buffer pools */
1105 for (i = 0; i < rx_scrqs; i++) { 1099 for (i = 0; i < rx_scrqs; i++) {
1106 rx_pool = &adapter->rx_pool[i]; 1100 rx_pool = &adapter->rx_pool[i];
1107 if (!rx_pool) 1101 if (!rx_pool || !rx_pool->rx_buff)
1108 continue; 1102 continue;
1109 1103
1110 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i); 1104 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1111 for (j = 0; j < rx_entries; j++) { 1105 for (j = 0; j < rx_entries; j++) {
1112 if (rx_pool->rx_buff[j].skb) { 1106 rx_buff = &rx_pool->rx_buff[j];
1113 dev_kfree_skb_any(rx_pool->rx_buff[j].skb); 1107 if (rx_buff && rx_buff->skb) {
1114 rx_pool->rx_buff[j].skb = NULL; 1108 dev_kfree_skb_any(rx_buff->skb);
1109 rx_buff->skb = NULL;
1115 } 1110 }
1116 } 1111 }
1117 } 1112 }
@@ -1120,6 +1115,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1120static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1115static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1121{ 1116{
1122 struct ibmvnic_tx_pool *tx_pool; 1117 struct ibmvnic_tx_pool *tx_pool;
1118 struct ibmvnic_tx_buff *tx_buff;
1123 u64 tx_entries; 1119 u64 tx_entries;
1124 int tx_scrqs; 1120 int tx_scrqs;
1125 int i, j; 1121 int i, j;
@@ -1133,14 +1129,15 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1133 /* Free any remaining skbs in the tx buffer pools */ 1129 /* Free any remaining skbs in the tx buffer pools */
1134 for (i = 0; i < tx_scrqs; i++) { 1130 for (i = 0; i < tx_scrqs; i++) {
1135 tx_pool = &adapter->tx_pool[i]; 1131 tx_pool = &adapter->tx_pool[i];
1136 if (!tx_pool) 1132 if (!tx_pool && !tx_pool->tx_buff)
1137 continue; 1133 continue;
1138 1134
1139 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); 1135 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1140 for (j = 0; j < tx_entries; j++) { 1136 for (j = 0; j < tx_entries; j++) {
1141 if (tx_pool->tx_buff[j].skb) { 1137 tx_buff = &tx_pool->tx_buff[j];
1142 dev_kfree_skb_any(tx_pool->tx_buff[j].skb); 1138 if (tx_buff && tx_buff->skb) {
1143 tx_pool->tx_buff[j].skb = NULL; 1139 dev_kfree_skb_any(tx_buff->skb);
1140 tx_buff->skb = NULL;
1144 } 1141 }
1145 } 1142 }
1146 } 1143 }
@@ -1482,6 +1479,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1482 if ((*hdrs >> 7) & 1) { 1479 if ((*hdrs >> 7) & 1) {
1483 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); 1480 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1484 tx_crq.v1.n_crq_elem = num_entries; 1481 tx_crq.v1.n_crq_elem = num_entries;
1482 tx_buff->num_entries = num_entries;
1485 tx_buff->indir_arr[0] = tx_crq; 1483 tx_buff->indir_arr[0] = tx_crq;
1486 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, 1484 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1487 sizeof(tx_buff->indir_arr), 1485 sizeof(tx_buff->indir_arr),
@@ -1500,6 +1498,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1500 (u64)tx_buff->indir_dma, 1498 (u64)tx_buff->indir_dma,
1501 (u64)num_entries); 1499 (u64)num_entries);
1502 } else { 1500 } else {
1501 tx_buff->num_entries = num_entries;
1503 lpar_rc = send_subcrq(adapter, handle_array[queue_num], 1502 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1504 &tx_crq); 1503 &tx_crq);
1505 } 1504 }
@@ -1532,11 +1531,10 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1532 1531
1533 if (atomic_add_return(num_entries, &tx_scrq->used) 1532 if (atomic_add_return(num_entries, &tx_scrq->used)
1534 >= adapter->req_tx_entries_per_subcrq) { 1533 >= adapter->req_tx_entries_per_subcrq) {
1535 netdev_info(netdev, "Stopping queue %d\n", queue_num); 1534 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1536 netif_stop_subqueue(netdev, queue_num); 1535 netif_stop_subqueue(netdev, queue_num);
1537 } 1536 }
1538 1537
1539 tx_buff->num_entries = num_entries;
1540 tx_packets++; 1538 tx_packets++;
1541 tx_bytes += skb->len; 1539 tx_bytes += skb->len;
1542 txq->trans_start = jiffies; 1540 txq->trans_start = jiffies;
@@ -2546,8 +2544,8 @@ restart_loop:
2546 __netif_subqueue_stopped(adapter->netdev, 2544 __netif_subqueue_stopped(adapter->netdev,
2547 scrq->pool_index)) { 2545 scrq->pool_index)) {
2548 netif_wake_subqueue(adapter->netdev, scrq->pool_index); 2546 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2549 netdev_info(adapter->netdev, "Started queue %d\n", 2547 netdev_dbg(adapter->netdev, "Started queue %d\n",
2550 scrq->pool_index); 2548 scrq->pool_index);
2551 } 2549 }
2552 } 2550 }
2553 2551
@@ -3079,7 +3077,7 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3079 strncpy(&vlcd->name, adapter->netdev->name, len); 3077 strncpy(&vlcd->name, adapter->netdev->name, len);
3080} 3078}
3081 3079
3082static void send_login(struct ibmvnic_adapter *adapter) 3080static int send_login(struct ibmvnic_adapter *adapter)
3083{ 3081{
3084 struct ibmvnic_login_rsp_buffer *login_rsp_buffer; 3082 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3085 struct ibmvnic_login_buffer *login_buffer; 3083 struct ibmvnic_login_buffer *login_buffer;
@@ -3095,6 +3093,12 @@ static void send_login(struct ibmvnic_adapter *adapter)
3095 struct vnic_login_client_data *vlcd; 3093 struct vnic_login_client_data *vlcd;
3096 int i; 3094 int i;
3097 3095
3096 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3097 netdev_err(adapter->netdev,
3098 "RX or TX queues are not allocated, device login failed\n");
3099 return -1;
3100 }
3101
3098 release_login_rsp_buffer(adapter); 3102 release_login_rsp_buffer(adapter);
3099 client_data_len = vnic_client_data_len(adapter); 3103 client_data_len = vnic_client_data_len(adapter);
3100 3104
@@ -3192,7 +3196,7 @@ static void send_login(struct ibmvnic_adapter *adapter)
3192 crq.login.len = cpu_to_be32(buffer_size); 3196 crq.login.len = cpu_to_be32(buffer_size);
3193 ibmvnic_send_crq(adapter, &crq); 3197 ibmvnic_send_crq(adapter, &crq);
3194 3198
3195 return; 3199 return 0;
3196 3200
3197buf_rsp_map_failed: 3201buf_rsp_map_failed:
3198 kfree(login_rsp_buffer); 3202 kfree(login_rsp_buffer);
@@ -3201,7 +3205,7 @@ buf_rsp_alloc_failed:
3201buf_map_failed: 3205buf_map_failed:
3202 kfree(login_buffer); 3206 kfree(login_buffer);
3203buf_alloc_failed: 3207buf_alloc_failed:
3204 return; 3208 return -1;
3205} 3209}
3206 3210
3207static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, 3211static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
@@ -4430,6 +4434,14 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4430 release_crq_queue(adapter); 4434 release_crq_queue(adapter);
4431 } 4435 }
4432 4436
4437 rc = init_stats_buffers(adapter);
4438 if (rc)
4439 return rc;
4440
4441 rc = init_stats_token(adapter);
4442 if (rc)
4443 return rc;
4444
4433 return rc; 4445 return rc;
4434} 4446}
4435 4447
@@ -4537,6 +4549,9 @@ static int ibmvnic_remove(struct vio_dev *dev)
4537 release_sub_crqs(adapter, 1); 4549 release_sub_crqs(adapter, 1);
4538 release_crq_queue(adapter); 4550 release_crq_queue(adapter);
4539 4551
4552 release_stats_token(adapter);
4553 release_stats_buffers(adapter);
4554
4540 adapter->state = VNIC_REMOVED; 4555 adapter->state = VNIC_REMOVED;
4541 4556
4542 mutex_unlock(&adapter->reset_lock); 4557 mutex_unlock(&adapter->reset_lock);