aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c42
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c10
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h3
3 files changed, 33 insertions, 22 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 098a7461db2d..2cdf34f6139f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -247,7 +247,7 @@ static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
247{ 247{
248 unsigned int i; 248 unsigned int i;
249 249
250 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 250 for (i = 0; i < pdata->rx_q_count; i++)
251 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 251 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
252 252
253 return 0; 253 return 0;
@@ -257,7 +257,7 @@ static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
257{ 257{
258 unsigned int i; 258 unsigned int i;
259 259
260 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 260 for (i = 0; i < pdata->tx_q_count; i++)
261 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 261 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
262 262
263 return 0; 263 return 0;
@@ -268,7 +268,7 @@ static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
268{ 268{
269 unsigned int i; 269 unsigned int i;
270 270
271 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 271 for (i = 0; i < pdata->rx_q_count; i++)
272 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 272 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
273 273
274 return 0; 274 return 0;
@@ -279,7 +279,7 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
279{ 279{
280 unsigned int i; 280 unsigned int i;
281 281
282 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 282 for (i = 0; i < pdata->tx_q_count; i++)
283 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 283 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
284 284
285 return 0; 285 return 0;
@@ -343,12 +343,12 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
343 unsigned int i; 343 unsigned int i;
344 344
345 /* Clear MTL flow control */ 345 /* Clear MTL flow control */
346 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 346 for (i = 0; i < pdata->rx_q_count; i++)
347 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 347 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
348 348
349 /* Clear MAC flow control */ 349 /* Clear MAC flow control */
350 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 350 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
351 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count); 351 q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
352 reg = MAC_Q0TFCR; 352 reg = MAC_Q0TFCR;
353 for (i = 0; i < q_count; i++) { 353 for (i = 0; i < q_count; i++) {
354 reg_val = XGMAC_IOREAD(pdata, reg); 354 reg_val = XGMAC_IOREAD(pdata, reg);
@@ -368,12 +368,12 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
368 unsigned int i; 368 unsigned int i;
369 369
370 /* Set MTL flow control */ 370 /* Set MTL flow control */
371 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 371 for (i = 0; i < pdata->rx_q_count; i++)
372 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); 372 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
373 373
374 /* Set MAC flow control */ 374 /* Set MAC flow control */
375 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 375 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
376 q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count); 376 q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
377 reg = MAC_Q0TFCR; 377 reg = MAC_Q0TFCR;
378 for (i = 0; i < q_count; i++) { 378 for (i = 0; i < q_count; i++) {
379 reg_val = XGMAC_IOREAD(pdata, reg); 379 reg_val = XGMAC_IOREAD(pdata, reg);
@@ -1551,11 +1551,11 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1551{ 1551{
1552 unsigned int i, count; 1552 unsigned int i, count;
1553 1553
1554 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 1554 for (i = 0; i < pdata->tx_q_count; i++)
1555 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 1555 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1556 1556
1557 /* Poll Until Poll Condition */ 1557 /* Poll Until Poll Condition */
1558 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) { 1558 for (i = 0; i < pdata->tx_q_count; i++) {
1559 count = 2000; 1559 count = 2000;
1560 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, 1560 while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
1561 MTL_Q_TQOMR, FTQ)) 1561 MTL_Q_TQOMR, FTQ))
@@ -1700,13 +1700,13 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
1700 unsigned int i; 1700 unsigned int i;
1701 1701
1702 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, 1702 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
1703 pdata->hw_feat.tx_q_cnt); 1703 pdata->tx_q_count);
1704 1704
1705 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 1705 for (i = 0; i < pdata->tx_q_count; i++)
1706 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); 1706 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
1707 1707
1708 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n", 1708 netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
1709 pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256)); 1709 pdata->tx_q_count, ((fifo_size + 1) * 256));
1710} 1710}
1711 1711
1712static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) 1712static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -1715,19 +1715,19 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
1715 unsigned int i; 1715 unsigned int i;
1716 1716
1717 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, 1717 fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
1718 pdata->hw_feat.rx_q_cnt); 1718 pdata->rx_q_count);
1719 1719
1720 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 1720 for (i = 0; i < pdata->rx_q_count; i++)
1721 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); 1721 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
1722 1722
1723 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n", 1723 netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
1724 pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256)); 1724 pdata->rx_q_count, ((fifo_size + 1) * 256));
1725} 1725}
1726 1726
1727static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata) 1727static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
1728{ 1728{
1729 unsigned int i, reg, reg_val; 1729 unsigned int i, reg, reg_val;
1730 unsigned int q_count = pdata->hw_feat.rx_q_cnt; 1730 unsigned int q_count = pdata->rx_q_count;
1731 1731
1732 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 1732 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
1733 reg = MTL_RQDCM0R; 1733 reg = MTL_RQDCM0R;
@@ -1749,7 +1749,7 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
1749{ 1749{
1750 unsigned int i; 1750 unsigned int i;
1751 1751
1752 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) { 1752 for (i = 0; i < pdata->rx_q_count; i++) {
1753 /* Activate flow control when less than 4k left in fifo */ 1753 /* Activate flow control when less than 4k left in fifo */
1754 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2); 1754 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
1755 1755
@@ -2141,7 +2141,7 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
2141 } 2141 }
2142 2142
2143 /* Enable each Tx queue */ 2143 /* Enable each Tx queue */
2144 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 2144 for (i = 0; i < pdata->tx_q_count; i++)
2145 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 2145 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
2146 MTL_Q_ENABLED); 2146 MTL_Q_ENABLED);
2147 2147
@@ -2158,7 +2158,7 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
2158 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 2158 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2159 2159
2160 /* Disable each Tx queue */ 2160 /* Disable each Tx queue */
2161 for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) 2161 for (i = 0; i < pdata->tx_q_count; i++)
2162 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); 2162 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
2163 2163
2164 /* Disable each Tx DMA channel */ 2164 /* Disable each Tx DMA channel */
@@ -2187,7 +2187,7 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
2187 2187
2188 /* Enable each Rx queue */ 2188 /* Enable each Rx queue */
2189 reg_val = 0; 2189 reg_val = 0;
2190 for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) 2190 for (i = 0; i < pdata->rx_q_count; i++)
2191 reg_val |= (0x02 << (i << 1)); 2191 reg_val |= (0x02 << (i << 1));
2192 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 2192 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
2193 2193
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index ca6a6af00f9f..04e6c72eb3c8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -355,9 +355,16 @@ static int xgbe_probe(struct platform_device *pdev)
355 /* Set default configuration data */ 355 /* Set default configuration data */
356 xgbe_default_config(pdata); 356 xgbe_default_config(pdata);
357 357
358 /* Calculate the number of Tx and Rx rings to be created */ 358 /* Calculate the number of Tx and Rx rings to be created
359 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
360 * the number of Tx queues to the number of Tx channels
361 * enabled
362 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
363 * number of Rx queues
364 */
359 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), 365 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
360 pdata->hw_feat.tx_ch_cnt); 366 pdata->hw_feat.tx_ch_cnt);
367 pdata->tx_q_count = pdata->tx_ring_count;
361 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); 368 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
362 if (ret) { 369 if (ret) {
363 dev_err(dev, "error setting real tx queue count\n"); 370 dev_err(dev, "error setting real tx queue count\n");
@@ -367,6 +374,7 @@ static int xgbe_probe(struct platform_device *pdev)
367 pdata->rx_ring_count = min_t(unsigned int, 374 pdata->rx_ring_count = min_t(unsigned int,
368 netif_get_num_default_rss_queues(), 375 netif_get_num_default_rss_queues(),
369 pdata->hw_feat.rx_ch_cnt); 376 pdata->hw_feat.rx_ch_cnt);
377 pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
370 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); 378 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
371 if (ret) { 379 if (ret) {
372 dev_err(dev, "error setting real rx queue count\n"); 380 dev_err(dev, "error setting real rx queue count\n");
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 8b6ad3e82c34..f011d88d2211 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -590,6 +590,9 @@ struct xgbe_prv_data {
590 unsigned int rx_ring_count; 590 unsigned int rx_ring_count;
591 unsigned int rx_desc_count; 591 unsigned int rx_desc_count;
592 592
593 unsigned int tx_q_count;
594 unsigned int rx_q_count;
595
593 /* Tx/Rx common settings */ 596 /* Tx/Rx common settings */
594 unsigned int pblx8; 597 unsigned int pblx8;
595 598