aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/atlx/atl1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/atlx/atl1.c')
-rw-r--r--drivers/net/atlx/atl1.c155
1 files changed, 110 insertions, 45 deletions
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 99298222c8f6..51eca233e1f1 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -100,6 +100,13 @@ static const struct pci_device_id atl1_pci_tbl[] = {
100}; 100};
101MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); 101MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
102 102
103static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
104 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
105
106static int debug = -1;
107module_param(debug, int, 0);
108MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
109
103/* 110/*
104 * atl1_sw_init - Initialize general software structures (struct atl1_adapter) 111 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
105 * @adapter: board private structure to initialize 112 * @adapter: board private structure to initialize
@@ -217,7 +224,9 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
217 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); 224 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
218 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); 225 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
219 if (unlikely(!tpd_ring->buffer_info)) { 226 if (unlikely(!tpd_ring->buffer_info)) {
220 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size); 227 if (netif_msg_drv(adapter))
228 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
229 size);
221 goto err_nomem; 230 goto err_nomem;
222 } 231 }
223 rfd_ring->buffer_info = 232 rfd_ring->buffer_info =
@@ -239,7 +248,8 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
239 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 248 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
240 &ring_header->dma); 249 &ring_header->dma);
241 if (unlikely(!ring_header->desc)) { 250 if (unlikely(!ring_header->desc)) {
242 dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); 251 if (netif_msg_drv(adapter))
252 dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
243 goto err_nomem; 253 goto err_nomem;
244 } 254 }
245 255
@@ -472,7 +482,8 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
472 /* link down */ 482 /* link down */
473 if (netif_carrier_ok(netdev)) { 483 if (netif_carrier_ok(netdev)) {
474 /* old link state: Up */ 484 /* old link state: Up */
475 dev_info(&adapter->pdev->dev, "link is down\n"); 485 if (netif_msg_link(adapter))
486 dev_info(&adapter->pdev->dev, "link is down\n");
476 adapter->link_speed = SPEED_0; 487 adapter->link_speed = SPEED_0;
477 netif_carrier_off(netdev); 488 netif_carrier_off(netdev);
478 netif_stop_queue(netdev); 489 netif_stop_queue(netdev);
@@ -515,11 +526,12 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
515 adapter->link_speed = speed; 526 adapter->link_speed = speed;
516 adapter->link_duplex = duplex; 527 adapter->link_duplex = duplex;
517 atl1_setup_mac_ctrl(adapter); 528 atl1_setup_mac_ctrl(adapter);
518 dev_info(&adapter->pdev->dev, 529 if (netif_msg_link(adapter))
519 "%s link is up %d Mbps %s\n", 530 dev_info(&adapter->pdev->dev,
520 netdev->name, adapter->link_speed, 531 "%s link is up %d Mbps %s\n",
521 adapter->link_duplex == FULL_DUPLEX ? 532 netdev->name, adapter->link_speed,
522 "full duplex" : "half duplex"); 533 adapter->link_duplex == FULL_DUPLEX ?
534 "full duplex" : "half duplex");
523 } 535 }
524 if (!netif_carrier_ok(netdev)) { 536 if (!netif_carrier_ok(netdev)) {
525 /* Link down -> Up */ 537 /* Link down -> Up */
@@ -583,7 +595,8 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
583 595
584 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 596 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
585 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 597 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
586 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 598 if (netif_msg_link(adapter))
599 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
587 return -EINVAL; 600 return -EINVAL;
588 } 601 }
589 602
@@ -997,8 +1010,9 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
997 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | 1010 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
998 ERR_FLAG_CODE | ERR_FLAG_OV)) { 1011 ERR_FLAG_CODE | ERR_FLAG_OV)) {
999 adapter->hw_csum_err++; 1012 adapter->hw_csum_err++;
1000 dev_printk(KERN_DEBUG, &pdev->dev, 1013 if (netif_msg_rx_err(adapter))
1001 "rx checksum error\n"); 1014 dev_printk(KERN_DEBUG, &pdev->dev,
1015 "rx checksum error\n");
1002 return; 1016 return;
1003 } 1017 }
1004 } 1018 }
@@ -1017,9 +1031,10 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1017 } 1031 }
1018 1032
1019 /* IPv4, but hardware thinks its checksum is wrong */ 1033 /* IPv4, but hardware thinks its checksum is wrong */
1020 dev_printk(KERN_DEBUG, &pdev->dev, 1034 if (netif_msg_rx_err(adapter))
1021 "hw csum wrong, pkt_flag:%x, err_flag:%x\n", 1035 dev_printk(KERN_DEBUG, &pdev->dev,
1022 rrd->pkt_flg, rrd->err_flg); 1036 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1037 rrd->pkt_flg, rrd->err_flg);
1023 skb->ip_summed = CHECKSUM_COMPLETE; 1038 skb->ip_summed = CHECKSUM_COMPLETE;
1024 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); 1039 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1025 adapter->hw_csum_err++; 1040 adapter->hw_csum_err++;
@@ -1133,14 +1148,17 @@ chk_rrd:
1133 /* rrd seems to be bad */ 1148 /* rrd seems to be bad */
1134 if (unlikely(i-- > 0)) { 1149 if (unlikely(i-- > 0)) {
1135 /* rrd may not be DMAed completely */ 1150 /* rrd may not be DMAed completely */
1136 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1151 if (netif_msg_rx_err(adapter))
1137 "incomplete RRD DMA transfer\n"); 1152 dev_printk(KERN_DEBUG,
1153 &adapter->pdev->dev,
1154 "unexpected RRD count\n");
1138 udelay(1); 1155 udelay(1);
1139 goto chk_rrd; 1156 goto chk_rrd;
1140 } 1157 }
1141 /* bad rrd */ 1158 /* bad rrd */
1142 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1159 if (netif_msg_rx_err(adapter))
1143 "bad RRD\n"); 1160 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1161 "bad RRD\n");
1144 /* see if update RFD index */ 1162 /* see if update RFD index */
1145 if (rrd->num_buf > 1) 1163 if (rrd->num_buf > 1)
1146 atl1_update_rfd_index(adapter, rrd); 1164 atl1_update_rfd_index(adapter, rrd);
@@ -1351,8 +1369,9 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1351 cso = css + (u8) skb->csum_offset; 1369 cso = css + (u8) skb->csum_offset;
1352 if (unlikely(css & 0x1)) { 1370 if (unlikely(css & 0x1)) {
1353 /* L1 hardware requires an even number here */ 1371 /* L1 hardware requires an even number here */
1354 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1372 if (netif_msg_tx_err(adapter))
1355 "payload offset not an even number\n"); 1373 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1374 "payload offset not an even number\n");
1356 return -1; 1375 return -1;
1357 } 1376 }
1358 ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << 1377 ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) <<
@@ -1573,7 +1592,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1573 1592
1574 if (!spin_trylock_irqsave(&adapter->lock, flags)) { 1593 if (!spin_trylock_irqsave(&adapter->lock, flags)) {
1575 /* Can't get lock - tell upper layer to requeue */ 1594 /* Can't get lock - tell upper layer to requeue */
1576 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); 1595 if (netif_msg_tx_queued(adapter))
1596 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1597 "tx locked\n");
1577 return NETDEV_TX_LOCKED; 1598 return NETDEV_TX_LOCKED;
1578 } 1599 }
1579 1600
@@ -1581,7 +1602,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1581 /* not enough descriptors */ 1602 /* not enough descriptors */
1582 netif_stop_queue(netdev); 1603 netif_stop_queue(netdev);
1583 spin_unlock_irqrestore(&adapter->lock, flags); 1604 spin_unlock_irqrestore(&adapter->lock, flags);
1584 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); 1605 if (netif_msg_tx_queued(adapter))
1606 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1607 "tx busy\n");
1585 return NETDEV_TX_BUSY; 1608 return NETDEV_TX_BUSY;
1586 } 1609 }
1587 1610
@@ -1657,8 +1680,9 @@ static irqreturn_t atl1_intr(int irq, void *data)
1657 1680
1658 /* check if PCIE PHY Link down */ 1681 /* check if PCIE PHY Link down */
1659 if (status & ISR_PHY_LINKDOWN) { 1682 if (status & ISR_PHY_LINKDOWN) {
1660 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1683 if (netif_msg_intr(adapter))
1661 "pcie phy link down %x\n", status); 1684 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1685 "pcie phy link down %x\n", status);
1662 if (netif_running(adapter->netdev)) { /* reset MAC */ 1686 if (netif_running(adapter->netdev)) { /* reset MAC */
1663 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 1687 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1664 schedule_work(&adapter->pcie_dma_to_rst_task); 1688 schedule_work(&adapter->pcie_dma_to_rst_task);
@@ -1668,9 +1692,10 @@ static irqreturn_t atl1_intr(int irq, void *data)
1668 1692
1669 /* check if DMA read/write error ? */ 1693 /* check if DMA read/write error ? */
1670 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 1694 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1671 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1695 if (netif_msg_intr(adapter))
1672 "pcie DMA r/w error (status = 0x%x)\n", 1696 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1673 status); 1697 "pcie DMA r/w error (status = 0x%x)\n",
1698 status);
1674 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 1699 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1675 schedule_work(&adapter->pcie_dma_to_rst_task); 1700 schedule_work(&adapter->pcie_dma_to_rst_task);
1676 return IRQ_HANDLED; 1701 return IRQ_HANDLED;
@@ -1693,8 +1718,11 @@ static irqreturn_t atl1_intr(int irq, void *data)
1693 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | 1718 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1694 ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 1719 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1695 ISR_HOST_RRD_OV)) 1720 ISR_HOST_RRD_OV))
1696 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1721 if (netif_msg_intr(adapter))
1697 "rx exception, ISR = 0x%x\n", status); 1722 dev_printk(KERN_DEBUG,
1723 &adapter->pdev->dev,
1724 "rx exception, ISR = 0x%x\n",
1725 status);
1698 atl1_intr_rx(adapter); 1726 atl1_intr_rx(adapter);
1699 } 1727 }
1700 1728
@@ -1791,8 +1819,9 @@ s32 atl1_up(struct atl1_adapter *adapter)
1791 1819
1792 err = pci_enable_msi(adapter->pdev); 1820 err = pci_enable_msi(adapter->pdev);
1793 if (err) { 1821 if (err) {
1794 dev_info(&adapter->pdev->dev, 1822 if (netif_msg_ifup(adapter))
1795 "Unable to enable MSI: %d\n", err); 1823 dev_info(&adapter->pdev->dev,
1824 "Unable to enable MSI: %d\n", err);
1796 irq_flags |= IRQF_SHARED; 1825 irq_flags |= IRQF_SHARED;
1797 } 1826 }
1798 1827
@@ -2061,6 +2090,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2061 adapter->netdev = netdev; 2090 adapter->netdev = netdev;
2062 adapter->pdev = pdev; 2091 adapter->pdev = pdev;
2063 adapter->hw.back = adapter; 2092 adapter->hw.back = adapter;
2093 adapter->msg_enable = netif_msg_init(debug, atl1_default_msg);
2064 2094
2065 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); 2095 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2066 if (!adapter->hw.hw_addr) { 2096 if (!adapter->hw.hw_addr) {
@@ -2070,7 +2100,8 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2070 /* get device revision number */ 2100 /* get device revision number */
2071 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + 2101 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2072 (REG_MASTER_CTRL + 2)); 2102 (REG_MASTER_CTRL + 2));
2073 dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); 2103 if (netif_msg_probe(adapter))
2104 dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
2074 2105
2075 /* set default ring resource counts */ 2106 /* set default ring resource counts */
2076 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; 2107 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
@@ -2390,7 +2421,9 @@ static int atl1_set_settings(struct net_device *netdev,
2390 u16 old_media_type = hw->media_type; 2421 u16 old_media_type = hw->media_type;
2391 2422
2392 if (netif_running(adapter->netdev)) { 2423 if (netif_running(adapter->netdev)) {
2393 dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n"); 2424 if (netif_msg_link(adapter))
2425 dev_dbg(&adapter->pdev->dev,
2426 "ethtool shutting down adapter\n");
2394 atl1_down(adapter); 2427 atl1_down(adapter);
2395 } 2428 }
2396 2429
@@ -2399,8 +2432,9 @@ static int atl1_set_settings(struct net_device *netdev,
2399 else { 2432 else {
2400 if (ecmd->speed == SPEED_1000) { 2433 if (ecmd->speed == SPEED_1000) {
2401 if (ecmd->duplex != DUPLEX_FULL) { 2434 if (ecmd->duplex != DUPLEX_FULL) {
2402 dev_warn(&adapter->pdev->dev, 2435 if (netif_msg_link(adapter))
2403 "can't force to 1000M half duplex\n"); 2436 dev_warn(&adapter->pdev->dev,
2437 "1000M half is invalid\n");
2404 ret_val = -EINVAL; 2438 ret_val = -EINVAL;
2405 goto exit_sset; 2439 goto exit_sset;
2406 } 2440 }
@@ -2438,8 +2472,9 @@ static int atl1_set_settings(struct net_device *netdev,
2438 } 2472 }
2439 if (atl1_phy_setup_autoneg_adv(hw)) { 2473 if (atl1_phy_setup_autoneg_adv(hw)) {
2440 ret_val = -EINVAL; 2474 ret_val = -EINVAL;
2441 dev_warn(&adapter->pdev->dev, 2475 if (netif_msg_link(adapter))
2442 "invalid ethtool speed/duplex setting\n"); 2476 dev_warn(&adapter->pdev->dev,
2477 "invalid ethtool speed/duplex setting\n");
2443 goto exit_sset; 2478 goto exit_sset;
2444 } 2479 }
2445 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || 2480 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
@@ -2471,10 +2506,14 @@ exit_sset:
2471 hw->media_type = old_media_type; 2506 hw->media_type = old_media_type;
2472 2507
2473 if (netif_running(adapter->netdev)) { 2508 if (netif_running(adapter->netdev)) {
2474 dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n"); 2509 if (netif_msg_link(adapter))
2510 dev_dbg(&adapter->pdev->dev,
2511 "ethtool starting adapter\n");
2475 atl1_up(adapter); 2512 atl1_up(adapter);
2476 } else if (!ret_val) { 2513 } else if (!ret_val) {
2477 dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n"); 2514 if (netif_msg_link(adapter))
2515 dev_dbg(&adapter->pdev->dev,
2516 "ethtool resetting adapter\n");
2478 atl1_reset(adapter); 2517 atl1_reset(adapter);
2479 } 2518 }
2480 return ret_val; 2519 return ret_val;
@@ -2531,6 +2570,18 @@ static int atl1_set_wol(struct net_device *netdev,
2531 return 0; 2570 return 0;
2532} 2571}
2533 2572
2573static u32 atl1_get_msglevel(struct net_device *netdev)
2574{
2575 struct atl1_adapter *adapter = netdev_priv(netdev);
2576 return adapter->msg_enable;
2577}
2578
2579static void atl1_set_msglevel(struct net_device *netdev, u32 value)
2580{
2581 struct atl1_adapter *adapter = netdev_priv(netdev);
2582 adapter->msg_enable = value;
2583}
2584
2534static int atl1_get_regs_len(struct net_device *netdev) 2585static int atl1_get_regs_len(struct net_device *netdev)
2535{ 2586{
2536 return ATL1_REG_COUNT * sizeof(u32); 2587 return ATL1_REG_COUNT * sizeof(u32);
@@ -2772,6 +2823,8 @@ const struct ethtool_ops atl1_ethtool_ops = {
2772 .get_drvinfo = atl1_get_drvinfo, 2823 .get_drvinfo = atl1_get_drvinfo,
2773 .get_wol = atl1_get_wol, 2824 .get_wol = atl1_get_wol,
2774 .set_wol = atl1_set_wol, 2825 .set_wol = atl1_set_wol,
2826 .get_msglevel = atl1_get_msglevel,
2827 .set_msglevel = atl1_set_msglevel,
2775 .get_regs_len = atl1_get_regs_len, 2828 .get_regs_len = atl1_get_regs_len,
2776 .get_regs = atl1_get_regs, 2829 .get_regs = atl1_get_regs,
2777 .get_ringparam = atl1_get_ringparam, 2830 .get_ringparam = atl1_get_ringparam,
@@ -2797,6 +2850,7 @@ const struct ethtool_ops atl1_ethtool_ops = {
2797s32 atl1_reset_hw(struct atl1_hw *hw) 2850s32 atl1_reset_hw(struct atl1_hw *hw)
2798{ 2851{
2799 struct pci_dev *pdev = hw->back->pdev; 2852 struct pci_dev *pdev = hw->back->pdev;
2853 struct atl1_adapter *adapter = hw->back;
2800 u32 icr; 2854 u32 icr;
2801 int i; 2855 int i;
2802 2856
@@ -2836,7 +2890,8 @@ s32 atl1_reset_hw(struct atl1_hw *hw)
2836 } 2890 }
2837 2891
2838 if (icr) { 2892 if (icr) {
2839 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); 2893 if (netif_msg_hw(adapter))
2894 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
2840 return icr; 2895 return icr;
2841 } 2896 }
2842 2897
@@ -3205,6 +3260,7 @@ s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
3205static s32 atl1_phy_reset(struct atl1_hw *hw) 3260static s32 atl1_phy_reset(struct atl1_hw *hw)
3206{ 3261{
3207 struct pci_dev *pdev = hw->back->pdev; 3262 struct pci_dev *pdev = hw->back->pdev;
3263 struct atl1_adapter *adapter = hw->back;
3208 s32 ret_val; 3264 s32 ret_val;
3209 u16 phy_data; 3265 u16 phy_data;
3210 3266
@@ -3237,7 +3293,8 @@ static s32 atl1_phy_reset(struct atl1_hw *hw)
3237 u32 val; 3293 u32 val;
3238 int i; 3294 int i;
3239 /* pcie serdes link may be down! */ 3295 /* pcie serdes link may be down! */
3240 dev_dbg(&pdev->dev, "pcie phy link down\n"); 3296 if (netif_msg_hw(adapter))
3297 dev_dbg(&pdev->dev, "pcie phy link down\n");
3241 3298
3242 for (i = 0; i < 25; i++) { 3299 for (i = 0; i < 25; i++) {
3243 msleep(1); 3300 msleep(1);
@@ -3247,7 +3304,9 @@ static s32 atl1_phy_reset(struct atl1_hw *hw)
3247 } 3304 }
3248 3305
3249 if ((val & (MDIO_START | MDIO_BUSY)) != 0) { 3306 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
3250 dev_warn(&pdev->dev, "pcie link down at least 25ms\n"); 3307 if (netif_msg_hw(adapter))
3308 dev_warn(&pdev->dev,
3309 "pcie link down at least 25ms\n");
3251 return ret_val; 3310 return ret_val;
3252 } 3311 }
3253 } 3312 }
@@ -3338,6 +3397,7 @@ s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
3338static s32 atl1_setup_link(struct atl1_hw *hw) 3397static s32 atl1_setup_link(struct atl1_hw *hw)
3339{ 3398{
3340 struct pci_dev *pdev = hw->back->pdev; 3399 struct pci_dev *pdev = hw->back->pdev;
3400 struct atl1_adapter *adapter = hw->back;
3341 s32 ret_val; 3401 s32 ret_val;
3342 3402
3343 /* 3403 /*
@@ -3348,13 +3408,16 @@ static s32 atl1_setup_link(struct atl1_hw *hw)
3348 */ 3408 */
3349 ret_val = atl1_phy_setup_autoneg_adv(hw); 3409 ret_val = atl1_phy_setup_autoneg_adv(hw);
3350 if (ret_val) { 3410 if (ret_val) {
3351 dev_dbg(&pdev->dev, "error setting up autonegotiation\n"); 3411 if (netif_msg_link(adapter))
3412 dev_dbg(&pdev->dev,
3413 "error setting up autonegotiation\n");
3352 return ret_val; 3414 return ret_val;
3353 } 3415 }
3354 /* SW.Reset , En-Auto-Neg if needed */ 3416 /* SW.Reset , En-Auto-Neg if needed */
3355 ret_val = atl1_phy_reset(hw); 3417 ret_val = atl1_phy_reset(hw);
3356 if (ret_val) { 3418 if (ret_val) {
3357 dev_dbg(&pdev->dev, "error resetting phy\n"); 3419 if (netif_msg_link(adapter))
3420 dev_dbg(&pdev->dev, "error resetting phy\n");
3358 return ret_val; 3421 return ret_val;
3359 } 3422 }
3360 hw->phy_configured = true; 3423 hw->phy_configured = true;
@@ -3429,6 +3492,7 @@ s32 atl1_init_hw(struct atl1_hw *hw)
3429s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) 3492s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
3430{ 3493{
3431 struct pci_dev *pdev = hw->back->pdev; 3494 struct pci_dev *pdev = hw->back->pdev;
3495 struct atl1_adapter *adapter = hw->back;
3432 s32 ret_val; 3496 s32 ret_val;
3433 u16 phy_data; 3497 u16 phy_data;
3434 3498
@@ -3451,7 +3515,8 @@ s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
3451 *speed = SPEED_10; 3515 *speed = SPEED_10;
3452 break; 3516 break;
3453 default: 3517 default:
3454 dev_dbg(&pdev->dev, "error getting speed\n"); 3518 if (netif_msg_hw(adapter))
3519 dev_dbg(&pdev->dev, "error getting speed\n");
3455 return ATLX_ERR_PHY_SPEED; 3520 return ATLX_ERR_PHY_SPEED;
3456 break; 3521 break;
3457 } 3522 }