diff options
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-dev.c')
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 110 |
1 files changed, 57 insertions, 53 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 21d9497518fd..a4473d8ff4fa 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
@@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, | |||
710 | if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) | 710 | if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) |
711 | return 0; | 711 | return 0; |
712 | 712 | ||
713 | DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving"); | 713 | netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", |
714 | enable ? "entering" : "leaving"); | ||
714 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); | 715 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); |
715 | 716 | ||
716 | return 0; | 717 | return 0; |
@@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, | |||
724 | if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) | 725 | if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) |
725 | return 0; | 726 | return 0; |
726 | 727 | ||
727 | DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving"); | 728 | netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", |
729 | enable ? "entering" : "leaving"); | ||
728 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); | 730 | XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); |
729 | 731 | ||
730 | return 0; | 732 | return 0; |
@@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, | |||
749 | mac_addr[0] = ha->addr[4]; | 751 | mac_addr[0] = ha->addr[4]; |
750 | mac_addr[1] = ha->addr[5]; | 752 | mac_addr[1] = ha->addr[5]; |
751 | 753 | ||
752 | DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr, | 754 | netif_dbg(pdata, drv, pdata->netdev, |
753 | *mac_reg); | 755 | "adding mac address %pM at %#x\n", |
756 | ha->addr, *mac_reg); | ||
754 | 757 | ||
755 | XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); | 758 | XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); |
756 | } | 759 | } |
@@ -907,23 +910,6 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, | |||
907 | else | 910 | else |
908 | mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); | 911 | mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); |
909 | 912 | ||
910 | /* If the PCS is changing modes, match the MAC speed to it */ | ||
911 | if (((mmd_address >> 16) == MDIO_MMD_PCS) && | ||
912 | ((mmd_address & 0xffff) == MDIO_CTRL2)) { | ||
913 | struct phy_device *phydev = pdata->phydev; | ||
914 | |||
915 | if (mmd_data & MDIO_PCS_CTRL2_TYPE) { | ||
916 | /* KX mode */ | ||
917 | if (phydev->supported & SUPPORTED_1000baseKX_Full) | ||
918 | xgbe_set_gmii_speed(pdata); | ||
919 | else | ||
920 | xgbe_set_gmii_2500_speed(pdata); | ||
921 | } else { | ||
922 | /* KR mode */ | ||
923 | xgbe_set_xgmii_speed(pdata); | ||
924 | } | ||
925 | } | ||
926 | |||
927 | /* The PCS registers are accessed using mmio. The underlying APB3 | 913 | /* The PCS registers are accessed using mmio. The underlying APB3 |
928 | * management interface uses indirect addressing to access the MMD | 914 | * management interface uses indirect addressing to access the MMD |
929 | * register sets. This requires accessing of the PCS register in two | 915 | * register sets. This requires accessing of the PCS register in two |
@@ -1124,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, | |||
1124 | unsigned int rx_usecs = pdata->rx_usecs; | 1110 | unsigned int rx_usecs = pdata->rx_usecs; |
1125 | unsigned int rx_frames = pdata->rx_frames; | 1111 | unsigned int rx_frames = pdata->rx_frames; |
1126 | unsigned int inte; | 1112 | unsigned int inte; |
1113 | dma_addr_t hdr_dma, buf_dma; | ||
1127 | 1114 | ||
1128 | if (!rx_usecs && !rx_frames) { | 1115 | if (!rx_usecs && !rx_frames) { |
1129 | /* No coalescing, interrupt for every descriptor */ | 1116 | /* No coalescing, interrupt for every descriptor */ |
@@ -1143,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, | |||
1143 | * Set buffer 2 (hi) address to buffer dma address (hi) and | 1130 | * Set buffer 2 (hi) address to buffer dma address (hi) and |
1144 | * set control bits OWN and INTE | 1131 | * set control bits OWN and INTE |
1145 | */ | 1132 | */ |
1146 | rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); | 1133 | hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; |
1147 | rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); | 1134 | buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; |
1148 | rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); | 1135 | rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); |
1149 | rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); | 1136 | rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); |
1137 | rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); | ||
1138 | rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); | ||
1150 | 1139 | ||
1151 | XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); | 1140 | XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); |
1152 | 1141 | ||
@@ -1322,7 +1311,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) | |||
1322 | for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { | 1311 | for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { |
1323 | switch (ets->tc_tsa[i]) { | 1312 | switch (ets->tc_tsa[i]) { |
1324 | case IEEE_8021QAZ_TSA_STRICT: | 1313 | case IEEE_8021QAZ_TSA_STRICT: |
1325 | DBGPR(" TC%u using SP\n", i); | 1314 | netif_dbg(pdata, drv, pdata->netdev, |
1315 | "TC%u using SP\n", i); | ||
1326 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, | 1316 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, |
1327 | MTL_TSA_SP); | 1317 | MTL_TSA_SP); |
1328 | break; | 1318 | break; |
@@ -1330,7 +1320,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) | |||
1330 | weight = total_weight * ets->tc_tx_bw[i] / 100; | 1320 | weight = total_weight * ets->tc_tx_bw[i] / 100; |
1331 | weight = clamp(weight, min_weight, total_weight); | 1321 | weight = clamp(weight, min_weight, total_weight); |
1332 | 1322 | ||
1333 | DBGPR(" TC%u using DWRR (weight %u)\n", i, weight); | 1323 | netif_dbg(pdata, drv, pdata->netdev, |
1324 | "TC%u using DWRR (weight %u)\n", i, weight); | ||
1334 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, | 1325 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, |
1335 | MTL_TSA_ETS); | 1326 | MTL_TSA_ETS); |
1336 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, | 1327 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, |
@@ -1359,7 +1350,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) | |||
1359 | } | 1350 | } |
1360 | mask &= 0xff; | 1351 | mask &= 0xff; |
1361 | 1352 | ||
1362 | DBGPR(" TC%u PFC mask=%#x\n", tc, mask); | 1353 | netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n", |
1354 | tc, mask); | ||
1363 | reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); | 1355 | reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); |
1364 | reg_val = XGMAC_IOREAD(pdata, reg); | 1356 | reg_val = XGMAC_IOREAD(pdata, reg); |
1365 | 1357 | ||
@@ -1457,8 +1449,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) | |||
1457 | /* Create a context descriptor if this is a TSO packet */ | 1449 | /* Create a context descriptor if this is a TSO packet */ |
1458 | if (tso_context || vlan_context) { | 1450 | if (tso_context || vlan_context) { |
1459 | if (tso_context) { | 1451 | if (tso_context) { |
1460 | DBGPR(" TSO context descriptor, mss=%u\n", | 1452 | netif_dbg(pdata, tx_queued, pdata->netdev, |
1461 | packet->mss); | 1453 | "TSO context descriptor, mss=%u\n", |
1454 | packet->mss); | ||
1462 | 1455 | ||
1463 | /* Set the MSS size */ | 1456 | /* Set the MSS size */ |
1464 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, | 1457 | XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, |
@@ -1476,8 +1469,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) | |||
1476 | } | 1469 | } |
1477 | 1470 | ||
1478 | if (vlan_context) { | 1471 | if (vlan_context) { |
1479 | DBGPR(" VLAN context descriptor, ctag=%u\n", | 1472 | netif_dbg(pdata, tx_queued, pdata->netdev, |
1480 | packet->vlan_ctag); | 1473 | "VLAN context descriptor, ctag=%u\n", |
1474 | packet->vlan_ctag); | ||
1481 | 1475 | ||
1482 | /* Mark it as a CONTEXT descriptor */ | 1476 | /* Mark it as a CONTEXT descriptor */ |
1483 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, | 1477 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, |
@@ -1533,6 +1527,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) | |||
1533 | packet->tcp_payload_len); | 1527 | packet->tcp_payload_len); |
1534 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, | 1528 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, |
1535 | packet->tcp_header_len / 4); | 1529 | packet->tcp_header_len / 4); |
1530 | |||
1531 | pdata->ext_stats.tx_tso_packets++; | ||
1536 | } else { | 1532 | } else { |
1537 | /* Enable CRC and Pad Insertion */ | 1533 | /* Enable CRC and Pad Insertion */ |
1538 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); | 1534 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); |
@@ -1594,9 +1590,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) | |||
1594 | rdesc = rdata->rdesc; | 1590 | rdesc = rdata->rdesc; |
1595 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); | 1591 | XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); |
1596 | 1592 | ||
1597 | #ifdef XGMAC_ENABLE_TX_DESC_DUMP | 1593 | if (netif_msg_tx_queued(pdata)) |
1598 | xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1); | 1594 | xgbe_dump_tx_desc(pdata, ring, start_index, |
1599 | #endif | 1595 | packet->rdesc_count, 1); |
1600 | 1596 | ||
1601 | /* Make sure ownership is written to the descriptor */ | 1597 | /* Make sure ownership is written to the descriptor */ |
1602 | dma_wmb(); | 1598 | dma_wmb(); |
@@ -1618,11 +1614,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) | |||
1618 | 1614 | ||
1619 | static int xgbe_dev_read(struct xgbe_channel *channel) | 1615 | static int xgbe_dev_read(struct xgbe_channel *channel) |
1620 | { | 1616 | { |
1617 | struct xgbe_prv_data *pdata = channel->pdata; | ||
1621 | struct xgbe_ring *ring = channel->rx_ring; | 1618 | struct xgbe_ring *ring = channel->rx_ring; |
1622 | struct xgbe_ring_data *rdata; | 1619 | struct xgbe_ring_data *rdata; |
1623 | struct xgbe_ring_desc *rdesc; | 1620 | struct xgbe_ring_desc *rdesc; |
1624 | struct xgbe_packet_data *packet = &ring->packet_data; | 1621 | struct xgbe_packet_data *packet = &ring->packet_data; |
1625 | struct net_device *netdev = channel->pdata->netdev; | 1622 | struct net_device *netdev = pdata->netdev; |
1626 | unsigned int err, etlt, l34t; | 1623 | unsigned int err, etlt, l34t; |
1627 | 1624 | ||
1628 | DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); | 1625 | DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); |
@@ -1637,9 +1634,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel) | |||
1637 | /* Make sure descriptor fields are read after reading the OWN bit */ | 1634 | /* Make sure descriptor fields are read after reading the OWN bit */ |
1638 | dma_rmb(); | 1635 | dma_rmb(); |
1639 | 1636 | ||
1640 | #ifdef XGMAC_ENABLE_RX_DESC_DUMP | 1637 | if (netif_msg_rx_status(pdata)) |
1641 | xgbe_dump_rx_desc(ring, rdesc, ring->cur); | 1638 | xgbe_dump_rx_desc(pdata, ring, ring->cur); |
1642 | #endif | ||
1643 | 1639 | ||
1644 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { | 1640 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { |
1645 | /* Timestamp Context Descriptor */ | 1641 | /* Timestamp Context Descriptor */ |
@@ -1661,9 +1657,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel) | |||
1661 | CONTEXT_NEXT, 1); | 1657 | CONTEXT_NEXT, 1); |
1662 | 1658 | ||
1663 | /* Get the header length */ | 1659 | /* Get the header length */ |
1664 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) | 1660 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { |
1665 | rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, | 1661 | rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, |
1666 | RX_NORMAL_DESC2, HL); | 1662 | RX_NORMAL_DESC2, HL); |
1663 | if (rdata->rx.hdr_len) | ||
1664 | pdata->ext_stats.rx_split_header_packets++; | ||
1665 | } | ||
1667 | 1666 | ||
1668 | /* Get the RSS hash */ | 1667 | /* Get the RSS hash */ |
1669 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { | 1668 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { |
@@ -1700,14 +1699,14 @@ static int xgbe_dev_read(struct xgbe_channel *channel) | |||
1700 | INCOMPLETE, 0); | 1699 | INCOMPLETE, 0); |
1701 | 1700 | ||
1702 | /* Set checksum done indicator as appropriate */ | 1701 | /* Set checksum done indicator as appropriate */ |
1703 | if (channel->pdata->netdev->features & NETIF_F_RXCSUM) | 1702 | if (netdev->features & NETIF_F_RXCSUM) |
1704 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, | 1703 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
1705 | CSUM_DONE, 1); | 1704 | CSUM_DONE, 1); |
1706 | 1705 | ||
1707 | /* Check for errors (only valid in last descriptor) */ | 1706 | /* Check for errors (only valid in last descriptor) */ |
1708 | err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); | 1707 | err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); |
1709 | etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); | 1708 | etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); |
1710 | DBGPR(" err=%u, etlt=%#x\n", err, etlt); | 1709 | netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); |
1711 | 1710 | ||
1712 | if (!err || !etlt) { | 1711 | if (!err || !etlt) { |
1713 | /* No error if err is 0 or etlt is 0 */ | 1712 | /* No error if err is 0 or etlt is 0 */ |
@@ -1718,7 +1717,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel) | |||
1718 | packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, | 1717 | packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, |
1719 | RX_NORMAL_DESC0, | 1718 | RX_NORMAL_DESC0, |
1720 | OVT); | 1719 | OVT); |
1721 | DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag); | 1720 | netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", |
1721 | packet->vlan_ctag); | ||
1722 | } | 1722 | } |
1723 | } else { | 1723 | } else { |
1724 | if ((etlt == 0x05) || (etlt == 0x06)) | 1724 | if ((etlt == 0x05) || (etlt == 0x06)) |
@@ -2026,9 +2026,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) | |||
2026 | for (i = 0; i < pdata->tx_q_count; i++) | 2026 | for (i = 0; i < pdata->tx_q_count; i++) |
2027 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); | 2027 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); |
2028 | 2028 | ||
2029 | netdev_notice(pdata->netdev, | 2029 | netif_info(pdata, drv, pdata->netdev, |
2030 | "%d Tx hardware queues, %d byte fifo per queue\n", | 2030 | "%d Tx hardware queues, %d byte fifo per queue\n", |
2031 | pdata->tx_q_count, ((fifo_size + 1) * 256)); | 2031 | pdata->tx_q_count, ((fifo_size + 1) * 256)); |
2032 | } | 2032 | } |
2033 | 2033 | ||
2034 | static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) | 2034 | static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) |
@@ -2042,9 +2042,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) | |||
2042 | for (i = 0; i < pdata->rx_q_count; i++) | 2042 | for (i = 0; i < pdata->rx_q_count; i++) |
2043 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); | 2043 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); |
2044 | 2044 | ||
2045 | netdev_notice(pdata->netdev, | 2045 | netif_info(pdata, drv, pdata->netdev, |
2046 | "%d Rx hardware queues, %d byte fifo per queue\n", | 2046 | "%d Rx hardware queues, %d byte fifo per queue\n", |
2047 | pdata->rx_q_count, ((fifo_size + 1) * 256)); | 2047 | pdata->rx_q_count, ((fifo_size + 1) * 256)); |
2048 | } | 2048 | } |
2049 | 2049 | ||
2050 | static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) | 2050 | static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) |
@@ -2063,14 +2063,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) | |||
2063 | 2063 | ||
2064 | for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { | 2064 | for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { |
2065 | for (j = 0; j < qptc; j++) { | 2065 | for (j = 0; j < qptc; j++) { |
2066 | DBGPR(" TXq%u mapped to TC%u\n", queue, i); | 2066 | netif_dbg(pdata, drv, pdata->netdev, |
2067 | "TXq%u mapped to TC%u\n", queue, i); | ||
2067 | XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, | 2068 | XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, |
2068 | Q2TCMAP, i); | 2069 | Q2TCMAP, i); |
2069 | pdata->q2tc_map[queue++] = i; | 2070 | pdata->q2tc_map[queue++] = i; |
2070 | } | 2071 | } |
2071 | 2072 | ||
2072 | if (i < qptc_extra) { | 2073 | if (i < qptc_extra) { |
2073 | DBGPR(" TXq%u mapped to TC%u\n", queue, i); | 2074 | netif_dbg(pdata, drv, pdata->netdev, |
2075 | "TXq%u mapped to TC%u\n", queue, i); | ||
2074 | XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, | 2076 | XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, |
2075 | Q2TCMAP, i); | 2077 | Q2TCMAP, i); |
2076 | pdata->q2tc_map[queue++] = i; | 2078 | pdata->q2tc_map[queue++] = i; |
@@ -2088,13 +2090,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) | |||
2088 | for (i = 0, prio = 0; i < prio_queues;) { | 2090 | for (i = 0, prio = 0; i < prio_queues;) { |
2089 | mask = 0; | 2091 | mask = 0; |
2090 | for (j = 0; j < ppq; j++) { | 2092 | for (j = 0; j < ppq; j++) { |
2091 | DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); | 2093 | netif_dbg(pdata, drv, pdata->netdev, |
2094 | "PRIO%u mapped to RXq%u\n", prio, i); | ||
2092 | mask |= (1 << prio); | 2095 | mask |= (1 << prio); |
2093 | pdata->prio2q_map[prio++] = i; | 2096 | pdata->prio2q_map[prio++] = i; |
2094 | } | 2097 | } |
2095 | 2098 | ||
2096 | if (i < ppq_extra) { | 2099 | if (i < ppq_extra) { |
2097 | DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); | 2100 | netif_dbg(pdata, drv, pdata->netdev, |
2101 | "PRIO%u mapped to RXq%u\n", prio, i); | ||
2098 | mask |= (1 << prio); | 2102 | mask |= (1 << prio); |
2099 | pdata->prio2q_map[prio++] = i; | 2103 | pdata->prio2q_map[prio++] = i; |
2100 | } | 2104 | } |