diff options
author | Joe Perches <joe@perches.com> | 2010-02-09 06:49:50 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-02-12 16:27:45 -0500 |
commit | bf82c189a92f06967cf790d3ab0fc9827a9d00ff (patch) | |
tree | d481962037158879f2d9af33cd595a5b406a8128 | |
parent | b3d95c5c93d4b57eaea0ad3f582b08a6b5fb3eb1 (diff) |
drivers/net/r8196.c: Use netif_printk macros
Simplify the logic a bit, make the message logs a bit more consistent.
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/r8169.c | 136 |
1 files changed, 48 insertions, 88 deletions
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 85c635f354b5..616ae5aa66aa 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -744,12 +744,10 @@ static void rtl8169_check_link_status(struct net_device *dev, | |||
744 | spin_lock_irqsave(&tp->lock, flags); | 744 | spin_lock_irqsave(&tp->lock, flags); |
745 | if (tp->link_ok(ioaddr)) { | 745 | if (tp->link_ok(ioaddr)) { |
746 | netif_carrier_on(dev); | 746 | netif_carrier_on(dev); |
747 | if (netif_msg_ifup(tp)) | 747 | netif_info(tp, ifup, dev, "link up\n"); |
748 | printk(KERN_INFO PFX "%s: link up\n", dev->name); | ||
749 | } else { | 748 | } else { |
750 | if (netif_msg_ifdown(tp)) | ||
751 | printk(KERN_INFO PFX "%s: link down\n", dev->name); | ||
752 | netif_carrier_off(dev); | 749 | netif_carrier_off(dev); |
750 | netif_info(tp, ifdown, dev, "link down\n"); | ||
753 | } | 751 | } |
754 | spin_unlock_irqrestore(&tp->lock, flags); | 752 | spin_unlock_irqrestore(&tp->lock, flags); |
755 | } | 753 | } |
@@ -862,11 +860,8 @@ static int rtl8169_set_speed_tbi(struct net_device *dev, | |||
862 | } else if (autoneg == AUTONEG_ENABLE) | 860 | } else if (autoneg == AUTONEG_ENABLE) |
863 | RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); | 861 | RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); |
864 | else { | 862 | else { |
865 | if (netif_msg_link(tp)) { | 863 | netif_warn(tp, link, dev, |
866 | printk(KERN_WARNING "%s: " | 864 | "incorrect speed setting refused in TBI mode\n"); |
867 | "incorrect speed setting refused in TBI mode\n", | ||
868 | dev->name); | ||
869 | } | ||
870 | ret = -EOPNOTSUPP; | 865 | ret = -EOPNOTSUPP; |
871 | } | 866 | } |
872 | 867 | ||
@@ -901,9 +896,9 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, | |||
901 | (tp->mac_version != RTL_GIGA_MAC_VER_15) && | 896 | (tp->mac_version != RTL_GIGA_MAC_VER_15) && |
902 | (tp->mac_version != RTL_GIGA_MAC_VER_16)) { | 897 | (tp->mac_version != RTL_GIGA_MAC_VER_16)) { |
903 | giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; | 898 | giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; |
904 | } else if (netif_msg_link(tp)) { | 899 | } else { |
905 | printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n", | 900 | netif_info(tp, link, dev, |
906 | dev->name); | 901 | "PHY does not support 1000Mbps\n"); |
907 | } | 902 | } |
908 | 903 | ||
909 | bmcr = BMCR_ANENABLE | BMCR_ANRESTART; | 904 | bmcr = BMCR_ANENABLE | BMCR_ANRESTART; |
@@ -2705,8 +2700,7 @@ static void rtl8169_phy_timer(unsigned long __opaque) | |||
2705 | if (tp->link_ok(ioaddr)) | 2700 | if (tp->link_ok(ioaddr)) |
2706 | goto out_unlock; | 2701 | goto out_unlock; |
2707 | 2702 | ||
2708 | if (netif_msg_link(tp)) | 2703 | netif_warn(tp, link, dev, "PHY reset until link up\n"); |
2709 | printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name); | ||
2710 | 2704 | ||
2711 | tp->phy_reset_enable(ioaddr); | 2705 | tp->phy_reset_enable(ioaddr); |
2712 | 2706 | ||
@@ -2776,8 +2770,7 @@ static void rtl8169_phy_reset(struct net_device *dev, | |||
2776 | return; | 2770 | return; |
2777 | msleep(1); | 2771 | msleep(1); |
2778 | } | 2772 | } |
2779 | if (netif_msg_link(tp)) | 2773 | netif_err(tp, link, dev, "PHY reset failed\n"); |
2780 | printk(KERN_ERR "%s: PHY reset failed.\n", dev->name); | ||
2781 | } | 2774 | } |
2782 | 2775 | ||
2783 | static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | 2776 | static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) |
@@ -2811,8 +2804,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | |||
2811 | */ | 2804 | */ |
2812 | rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL); | 2805 | rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL); |
2813 | 2806 | ||
2814 | if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp)) | 2807 | if (RTL_R8(PHYstatus) & TBI_Enable) |
2815 | printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name); | 2808 | netif_info(tp, link, dev, "TBI auto-negotiating\n"); |
2816 | } | 2809 | } |
2817 | 2810 | ||
2818 | static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) | 2811 | static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) |
@@ -3012,8 +3005,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3012 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 3005 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
3013 | rc = pci_enable_device(pdev); | 3006 | rc = pci_enable_device(pdev); |
3014 | if (rc < 0) { | 3007 | if (rc < 0) { |
3015 | if (netif_msg_probe(tp)) | 3008 | netif_err(tp, probe, dev, "enable failure\n"); |
3016 | dev_err(&pdev->dev, "enable failure\n"); | ||
3017 | goto err_out_free_dev_1; | 3009 | goto err_out_free_dev_1; |
3018 | } | 3010 | } |
3019 | 3011 | ||
@@ -3023,29 +3015,24 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3023 | 3015 | ||
3024 | /* make sure PCI base addr 1 is MMIO */ | 3016 | /* make sure PCI base addr 1 is MMIO */ |
3025 | if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { | 3017 | if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { |
3026 | if (netif_msg_probe(tp)) { | 3018 | netif_err(tp, probe, dev, |
3027 | dev_err(&pdev->dev, | 3019 | "region #%d not an MMIO resource, aborting\n", |
3028 | "region #%d not an MMIO resource, aborting\n", | 3020 | region); |
3029 | region); | ||
3030 | } | ||
3031 | rc = -ENODEV; | 3021 | rc = -ENODEV; |
3032 | goto err_out_mwi_3; | 3022 | goto err_out_mwi_3; |
3033 | } | 3023 | } |
3034 | 3024 | ||
3035 | /* check for weird/broken PCI region reporting */ | 3025 | /* check for weird/broken PCI region reporting */ |
3036 | if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { | 3026 | if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { |
3037 | if (netif_msg_probe(tp)) { | 3027 | netif_err(tp, probe, dev, |
3038 | dev_err(&pdev->dev, | 3028 | "Invalid PCI region size(s), aborting\n"); |
3039 | "Invalid PCI region size(s), aborting\n"); | ||
3040 | } | ||
3041 | rc = -ENODEV; | 3029 | rc = -ENODEV; |
3042 | goto err_out_mwi_3; | 3030 | goto err_out_mwi_3; |
3043 | } | 3031 | } |
3044 | 3032 | ||
3045 | rc = pci_request_regions(pdev, MODULENAME); | 3033 | rc = pci_request_regions(pdev, MODULENAME); |
3046 | if (rc < 0) { | 3034 | if (rc < 0) { |
3047 | if (netif_msg_probe(tp)) | 3035 | netif_err(tp, probe, dev, "could not request regions\n"); |
3048 | dev_err(&pdev->dev, "could not request regions.\n"); | ||
3049 | goto err_out_mwi_3; | 3036 | goto err_out_mwi_3; |
3050 | } | 3037 | } |
3051 | 3038 | ||
@@ -3058,10 +3045,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3058 | } else { | 3045 | } else { |
3059 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 3046 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
3060 | if (rc < 0) { | 3047 | if (rc < 0) { |
3061 | if (netif_msg_probe(tp)) { | 3048 | netif_err(tp, probe, dev, "DMA configuration failed\n"); |
3062 | dev_err(&pdev->dev, | ||
3063 | "DMA configuration failed.\n"); | ||
3064 | } | ||
3065 | goto err_out_free_res_4; | 3049 | goto err_out_free_res_4; |
3066 | } | 3050 | } |
3067 | } | 3051 | } |
@@ -3069,15 +3053,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3069 | /* ioremap MMIO region */ | 3053 | /* ioremap MMIO region */ |
3070 | ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); | 3054 | ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); |
3071 | if (!ioaddr) { | 3055 | if (!ioaddr) { |
3072 | if (netif_msg_probe(tp)) | 3056 | netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); |
3073 | dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); | ||
3074 | rc = -EIO; | 3057 | rc = -EIO; |
3075 | goto err_out_free_res_4; | 3058 | goto err_out_free_res_4; |
3076 | } | 3059 | } |
3077 | 3060 | ||
3078 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); | 3061 | tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); |
3079 | if (!tp->pcie_cap && netif_msg_probe(tp)) | 3062 | if (!tp->pcie_cap) |
3080 | dev_info(&pdev->dev, "no PCI Express capability\n"); | 3063 | netif_info(tp, probe, dev, "no PCI Express capability\n"); |
3081 | 3064 | ||
3082 | RTL_W16(IntrMask, 0x0000); | 3065 | RTL_W16(IntrMask, 0x0000); |
3083 | 3066 | ||
@@ -3100,10 +3083,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3100 | 3083 | ||
3101 | /* Use appropriate default if unknown */ | 3084 | /* Use appropriate default if unknown */ |
3102 | if (tp->mac_version == RTL_GIGA_MAC_NONE) { | 3085 | if (tp->mac_version == RTL_GIGA_MAC_NONE) { |
3103 | if (netif_msg_probe(tp)) { | 3086 | netif_notice(tp, probe, dev, |
3104 | dev_notice(&pdev->dev, | 3087 | "unknown MAC, using family default\n"); |
3105 | "unknown MAC, using family default\n"); | ||
3106 | } | ||
3107 | tp->mac_version = cfg->default_ver; | 3088 | tp->mac_version = cfg->default_ver; |
3108 | } | 3089 | } |
3109 | 3090 | ||
@@ -3185,14 +3166,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3185 | 3166 | ||
3186 | pci_set_drvdata(pdev, dev); | 3167 | pci_set_drvdata(pdev, dev); |
3187 | 3168 | ||
3188 | if (netif_msg_probe(tp)) { | 3169 | netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n", |
3189 | u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff; | 3170 | rtl_chip_info[tp->chipset].name, |
3190 | 3171 | dev->base_addr, dev->dev_addr, | |
3191 | printk(KERN_INFO "%s: %s at 0x%lx, %pM, XID %08x IRQ %d\n", | 3172 | (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq); |
3192 | dev->name, | ||
3193 | rtl_chip_info[tp->chipset].name, | ||
3194 | dev->base_addr, dev->dev_addr, xid, dev->irq); | ||
3195 | } | ||
3196 | 3173 | ||
3197 | rtl8169_init_phy(dev, tp); | 3174 | rtl8169_init_phy(dev, tp); |
3198 | 3175 | ||
@@ -4131,10 +4108,10 @@ static void rtl8169_reinit_task(struct work_struct *work) | |||
4131 | 4108 | ||
4132 | ret = rtl8169_open(dev); | 4109 | ret = rtl8169_open(dev); |
4133 | if (unlikely(ret < 0)) { | 4110 | if (unlikely(ret < 0)) { |
4134 | if (net_ratelimit() && netif_msg_drv(tp)) { | 4111 | if (net_ratelimit()) |
4135 | printk(KERN_ERR PFX "%s: reinit failure (status = %d)." | 4112 | netif_err(tp, drv, dev, |
4136 | " Rescheduling.\n", dev->name, ret); | 4113 | "reinit failure (status = %d). Rescheduling\n", |
4137 | } | 4114 | ret); |
4138 | rtl8169_schedule_work(dev, rtl8169_reinit_task); | 4115 | rtl8169_schedule_work(dev, rtl8169_reinit_task); |
4139 | } | 4116 | } |
4140 | 4117 | ||
@@ -4164,10 +4141,8 @@ static void rtl8169_reset_task(struct work_struct *work) | |||
4164 | netif_wake_queue(dev); | 4141 | netif_wake_queue(dev); |
4165 | rtl8169_check_link_status(dev, tp, tp->mmio_addr); | 4142 | rtl8169_check_link_status(dev, tp, tp->mmio_addr); |
4166 | } else { | 4143 | } else { |
4167 | if (net_ratelimit() && netif_msg_intr(tp)) { | 4144 | if (net_ratelimit()) |
4168 | printk(KERN_EMERG PFX "%s: Rx buffers shortage\n", | 4145 | netif_emerg(tp, intr, dev, "Rx buffers shortage\n"); |
4169 | dev->name); | ||
4170 | } | ||
4171 | rtl8169_schedule_work(dev, rtl8169_reset_task); | 4146 | rtl8169_schedule_work(dev, rtl8169_reset_task); |
4172 | } | 4147 | } |
4173 | 4148 | ||
@@ -4255,11 +4230,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
4255 | u32 opts1; | 4230 | u32 opts1; |
4256 | 4231 | ||
4257 | if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { | 4232 | if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { |
4258 | if (netif_msg_drv(tp)) { | 4233 | netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); |
4259 | printk(KERN_ERR | ||
4260 | "%s: BUG! Tx Ring full when queue awake!\n", | ||
4261 | dev->name); | ||
4262 | } | ||
4263 | goto err_stop; | 4234 | goto err_stop; |
4264 | } | 4235 | } |
4265 | 4236 | ||
@@ -4321,11 +4292,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) | |||
4321 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | 4292 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); |
4322 | pci_read_config_word(pdev, PCI_STATUS, &pci_status); | 4293 | pci_read_config_word(pdev, PCI_STATUS, &pci_status); |
4323 | 4294 | ||
4324 | if (netif_msg_intr(tp)) { | 4295 | netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n", |
4325 | printk(KERN_ERR | 4296 | pci_cmd, pci_status); |
4326 | "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n", | ||
4327 | dev->name, pci_cmd, pci_status); | ||
4328 | } | ||
4329 | 4297 | ||
4330 | /* | 4298 | /* |
4331 | * The recovery sequence below admits a very elaborated explanation: | 4299 | * The recovery sequence below admits a very elaborated explanation: |
@@ -4349,8 +4317,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) | |||
4349 | 4317 | ||
4350 | /* The infamous DAC f*ckup only happens at boot time */ | 4318 | /* The infamous DAC f*ckup only happens at boot time */ |
4351 | if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { | 4319 | if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { |
4352 | if (netif_msg_intr(tp)) | 4320 | netif_info(tp, intr, dev, "disabling PCI DAC\n"); |
4353 | printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name); | ||
4354 | tp->cp_cmd &= ~PCIDAC; | 4321 | tp->cp_cmd &= ~PCIDAC; |
4355 | RTL_W16(CPlusCmd, tp->cp_cmd); | 4322 | RTL_W16(CPlusCmd, tp->cp_cmd); |
4356 | dev->features &= ~NETIF_F_HIGHDMA; | 4323 | dev->features &= ~NETIF_F_HIGHDMA; |
@@ -4477,11 +4444,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
4477 | if (status & DescOwn) | 4444 | if (status & DescOwn) |
4478 | break; | 4445 | break; |
4479 | if (unlikely(status & RxRES)) { | 4446 | if (unlikely(status & RxRES)) { |
4480 | if (netif_msg_rx_err(tp)) { | 4447 | netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n", |
4481 | printk(KERN_INFO | 4448 | status); |
4482 | "%s: Rx ERROR. status = %08x\n", | ||
4483 | dev->name, status); | ||
4484 | } | ||
4485 | dev->stats.rx_errors++; | 4449 | dev->stats.rx_errors++; |
4486 | if (status & (RxRWT | RxRUNT)) | 4450 | if (status & (RxRWT | RxRUNT)) |
4487 | dev->stats.rx_length_errors++; | 4451 | dev->stats.rx_length_errors++; |
@@ -4544,8 +4508,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
4544 | tp->cur_rx = cur_rx; | 4508 | tp->cur_rx = cur_rx; |
4545 | 4509 | ||
4546 | delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); | 4510 | delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); |
4547 | if (!delta && count && netif_msg_intr(tp)) | 4511 | if (!delta && count) |
4548 | printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name); | 4512 | netif_info(tp, intr, dev, "no Rx buffer allocated\n"); |
4549 | tp->dirty_rx += delta; | 4513 | tp->dirty_rx += delta; |
4550 | 4514 | ||
4551 | /* | 4515 | /* |
@@ -4555,8 +4519,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev, | |||
4555 | * after refill ? | 4519 | * after refill ? |
4556 | * - how do others driver handle this condition (Uh oh...). | 4520 | * - how do others driver handle this condition (Uh oh...). |
4557 | */ | 4521 | */ |
4558 | if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp)) | 4522 | if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) |
4559 | printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name); | 4523 | netif_emerg(tp, intr, dev, "Rx buffers exhausted\n"); |
4560 | 4524 | ||
4561 | return count; | 4525 | return count; |
4562 | } | 4526 | } |
@@ -4611,10 +4575,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) | |||
4611 | 4575 | ||
4612 | if (likely(napi_schedule_prep(&tp->napi))) | 4576 | if (likely(napi_schedule_prep(&tp->napi))) |
4613 | __napi_schedule(&tp->napi); | 4577 | __napi_schedule(&tp->napi); |
4614 | else if (netif_msg_intr(tp)) { | 4578 | else |
4615 | printk(KERN_INFO "%s: interrupt %04x in poll\n", | 4579 | netif_info(tp, intr, dev, |
4616 | dev->name, status); | 4580 | "interrupt %04x in poll\n", status); |
4617 | } | ||
4618 | } | 4581 | } |
4619 | 4582 | ||
4620 | /* We only get a new MSI interrupt when all active irq | 4583 | /* We only get a new MSI interrupt when all active irq |
@@ -4750,10 +4713,7 @@ static void rtl_set_rx_mode(struct net_device *dev) | |||
4750 | 4713 | ||
4751 | if (dev->flags & IFF_PROMISC) { | 4714 | if (dev->flags & IFF_PROMISC) { |
4752 | /* Unconditionally log net taps. */ | 4715 | /* Unconditionally log net taps. */ |
4753 | if (netif_msg_link(tp)) { | 4716 | netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); |
4754 | printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", | ||
4755 | dev->name); | ||
4756 | } | ||
4757 | rx_mode = | 4717 | rx_mode = |
4758 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | | 4718 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | |
4759 | AcceptAllPhys; | 4719 | AcceptAllPhys; |