aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-10-22 17:08:07 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-22 17:08:07 -0400
commitf18a37ff24160caad876a8d5c78f77d3c30efed7 (patch)
treec0df8515b2ea1dc8834a4ec46ca60127b025fc5e
parent396a30cce15d084b2b1a395aa6d515c3d559c674 (diff)
parentfabd545c6d27ac1977fe567c43cd4c72fad04172 (diff)
Merge branch 'qed-fixes'
Sudarsana Reddy Kalluru says: ==================== qed*: fix series. The patch series contains several minor bug fixes for qed/qede modules. Please consider applying this to 'net' branch. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c99
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c11
-rw-r--r--include/linux/qed/qed_if.h1
6 files changed, 75 insertions, 42 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 130da1c0490b..a4789a93b692 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
1190 if (!dcbx_info) 1190 if (!dcbx_info)
1191 return -ENOMEM; 1191 return -ENOMEM;
1192 1192
1193 memset(dcbx_info, 0, sizeof(*dcbx_info));
1193 rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); 1194 rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
1194 if (rc) { 1195 if (rc) {
1195 kfree(dcbx_info); 1196 kfree(dcbx_info);
@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
1225 if (!dcbx_info) 1226 if (!dcbx_info)
1226 return NULL; 1227 return NULL;
1227 1228
1229 memset(dcbx_info, 0, sizeof(*dcbx_info));
1228 if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { 1230 if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
1229 kfree(dcbx_info); 1231 kfree(dcbx_info);
1230 return NULL; 1232 return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 8dc3f4670f64..c418360ba02a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -878,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
878 } 878 }
879 } 879 }
880 880
881 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
881 rc = qed_nic_setup(cdev); 882 rc = qed_nic_setup(cdev);
882 if (rc) 883 if (rc)
883 goto err; 884 goto err;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 28c0e9f42c9e..974689a13337 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -348,12 +348,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
348int qede_txq_has_work(struct qede_tx_queue *txq); 348int qede_txq_has_work(struct qede_tx_queue *txq);
349void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, 349void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
350 u8 count); 350 u8 count);
351void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
351 352
352#define RX_RING_SIZE_POW 13 353#define RX_RING_SIZE_POW 13
353#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) 354#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
354#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) 355#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
355#define NUM_RX_BDS_MIN 128 356#define NUM_RX_BDS_MIN 128
356#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX 357#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
357 358
358#define TX_RING_SIZE_POW 13 359#define TX_RING_SIZE_POW 13
359#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW)) 360#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 25a9b293ee8f..12251a1032d1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -756,6 +756,8 @@ static void qede_get_channels(struct net_device *dev,
756 struct qede_dev *edev = netdev_priv(dev); 756 struct qede_dev *edev = netdev_priv(dev);
757 757
758 channels->max_combined = QEDE_MAX_RSS_CNT(edev); 758 channels->max_combined = QEDE_MAX_RSS_CNT(edev);
759 channels->max_rx = QEDE_MAX_RSS_CNT(edev);
760 channels->max_tx = QEDE_MAX_RSS_CNT(edev);
759 channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx - 761 channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
760 edev->fp_num_rx; 762 edev->fp_num_rx;
761 channels->tx_count = edev->fp_num_tx; 763 channels->tx_count = edev->fp_num_tx;
@@ -820,6 +822,13 @@ static int qede_set_channels(struct net_device *dev,
820 edev->req_queues = count; 822 edev->req_queues = count;
821 edev->req_num_tx = channels->tx_count; 823 edev->req_num_tx = channels->tx_count;
822 edev->req_num_rx = channels->rx_count; 824 edev->req_num_rx = channels->rx_count;
825 /* Reset the indirection table if rx queue count is updated */
826 if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
827 edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
828 memset(&edev->rss_params.rss_ind_table, 0,
829 sizeof(edev->rss_params.rss_ind_table));
830 }
831
823 if (netif_running(dev)) 832 if (netif_running(dev))
824 qede_reload(edev, NULL, NULL); 833 qede_reload(edev, NULL, NULL);
825 834
@@ -1053,6 +1062,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
1053 struct qede_dev *edev = netdev_priv(dev); 1062 struct qede_dev *edev = netdev_priv(dev);
1054 int i; 1063 int i;
1055 1064
1065 if (edev->dev_info.common.num_hwfns > 1) {
1066 DP_INFO(edev,
1067 "RSS configuration is not supported for 100G devices\n");
1068 return -EOPNOTSUPP;
1069 }
1070
1056 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) 1071 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1057 return -EOPNOTSUPP; 1072 return -EOPNOTSUPP;
1058 1073
@@ -1184,8 +1199,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
1184 } 1199 }
1185 1200
1186 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); 1201 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
1187 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 1202 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
1188 BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); 1203 BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
1189 txq->sw_tx_cons++; 1204 txq->sw_tx_cons++;
1190 txq->sw_tx_ring[idx].skb = NULL; 1205 txq->sw_tx_ring[idx].skb = NULL;
1191 1206
@@ -1199,8 +1214,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
1199 struct qede_rx_queue *rxq = NULL; 1214 struct qede_rx_queue *rxq = NULL;
1200 struct sw_rx_data *sw_rx_data; 1215 struct sw_rx_data *sw_rx_data;
1201 union eth_rx_cqe *cqe; 1216 union eth_rx_cqe *cqe;
1217 int i, rc = 0;
1202 u8 *data_ptr; 1218 u8 *data_ptr;
1203 int i;
1204 1219
1205 for_each_queue(i) { 1220 for_each_queue(i) {
1206 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { 1221 if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
@@ -1219,46 +1234,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
1219 * queue and that the loopback traffic is not IP. 1234 * queue and that the loopback traffic is not IP.
1220 */ 1235 */
1221 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { 1236 for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
1222 if (qede_has_rx_work(rxq)) 1237 if (!qede_has_rx_work(rxq)) {
1238 usleep_range(100, 200);
1239 continue;
1240 }
1241
1242 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1243 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1244
1245 /* Memory barrier to prevent the CPU from doing speculative
1246 * reads of CQE/BD before reading hw_comp_cons. If the CQE is
1247 * read before it is written by FW, then FW writes CQE and SB,
1248 * and then the CPU reads the hw_comp_cons, it will use an old
1249 * CQE.
1250 */
1251 rmb();
1252
1253 /* Get the CQE from the completion ring */
1254 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1255
1256 /* Get the data from the SW ring */
1257 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1258 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1259 fp_cqe = &cqe->fast_path_regular;
1260 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1261 data_ptr = (u8 *)(page_address(sw_rx_data->data) +
1262 fp_cqe->placement_offset +
1263 sw_rx_data->page_offset);
1264 if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
1265 ether_addr_equal(data_ptr + ETH_ALEN,
1266 edev->ndev->dev_addr)) {
1267 for (i = ETH_HLEN; i < len; i++)
1268 if (data_ptr[i] != (unsigned char)(i & 0xff)) {
1269 rc = -1;
1270 break;
1271 }
1272
1273 qede_recycle_rx_bd_ring(rxq, edev, 1);
1274 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1223 break; 1275 break;
1224 usleep_range(100, 200); 1276 }
1277
1278 DP_INFO(edev, "Not the transmitted packet\n");
1279 qede_recycle_rx_bd_ring(rxq, edev, 1);
1280 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1225 } 1281 }
1226 1282
1227 if (!qede_has_rx_work(rxq)) { 1283 if (i == QEDE_SELFTEST_POLL_COUNT) {
1228 DP_NOTICE(edev, "Failed to receive the traffic\n"); 1284 DP_NOTICE(edev, "Failed to receive the traffic\n");
1229 return -1; 1285 return -1;
1230 } 1286 }
1231 1287
1232 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); 1288 qede_update_rx_prod(edev, rxq);
1233 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1234 1289
1235 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 1290 return rc;
1236 * / BD before reading hw_comp_cons. If the CQE is read before it is
1237 * written by FW, then FW writes CQE and SB, and then the CPU reads the
1238 * hw_comp_cons, it will use an old CQE.
1239 */
1240 rmb();
1241
1242 /* Get the CQE from the completion ring */
1243 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1244
1245 /* Get the data from the SW ring */
1246 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1247 sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
1248 fp_cqe = &cqe->fast_path_regular;
1249 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1250 data_ptr = (u8 *)(page_address(sw_rx_data->data) +
1251 fp_cqe->placement_offset + sw_rx_data->page_offset);
1252 for (i = ETH_HLEN; i < len; i++)
1253 if (data_ptr[i] != (unsigned char)(i & 0xff)) {
1254 DP_NOTICE(edev, "Loopback test failed\n");
1255 qede_recycle_rx_bd_ring(rxq, edev, 1);
1256 return -1;
1257 }
1258
1259 qede_recycle_rx_bd_ring(rxq, edev, 1);
1260
1261 return 0;
1262} 1291}
1263 1292
1264static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) 1293static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 343038ca047d..444b271059b2 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -313,8 +313,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
313 split_bd_len = BD_UNMAP_LEN(split); 313 split_bd_len = BD_UNMAP_LEN(split);
314 bds_consumed++; 314 bds_consumed++;
315 } 315 }
316 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 316 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
317 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); 317 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
318 318
319 /* Unmap the data of the skb frags */ 319 /* Unmap the data of the skb frags */
320 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { 320 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
@@ -359,8 +359,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
359 nbd--; 359 nbd--;
360 } 360 }
361 361
362 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), 362 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
363 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); 363 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
364 364
365 /* Unmap the data of the skb frags */ 365 /* Unmap the data of the skb frags */
366 for (i = 0; i < nbd; i++) { 366 for (i = 0; i < nbd; i++) {
@@ -943,8 +943,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
943 return 0; 943 return 0;
944} 944}
945 945
946static inline void qede_update_rx_prod(struct qede_dev *edev, 946void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
947 struct qede_rx_queue *rxq)
948{ 947{
949 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); 948 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
950 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); 949 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index f9ae903bbb84..8978a60371f4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -146,6 +146,7 @@ enum qed_led_mode {
146#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) 146#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
147 147
148#define QED_COALESCE_MAX 0xFF 148#define QED_COALESCE_MAX 0xFF
149#define QED_DEFAULT_RX_USECS 12
149 150
150/* forward */ 151/* forward */
151struct qed_dev; 152struct qed_dev;