diff options
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r-- | drivers/net/skge.c | 235 |
1 files changed, 159 insertions, 76 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 45283f3f95e4..e482e7fcbb2b 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #include "skge.h" | 42 | #include "skge.h" |
43 | 43 | ||
44 | #define DRV_NAME "skge" | 44 | #define DRV_NAME "skge" |
45 | #define DRV_VERSION "1.9" | 45 | #define DRV_VERSION "1.10" |
46 | #define PFX DRV_NAME " " | 46 | #define PFX DRV_NAME " " |
47 | 47 | ||
48 | #define DEFAULT_TX_RING_SIZE 128 | 48 | #define DEFAULT_TX_RING_SIZE 128 |
@@ -132,18 +132,93 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
132 | } | 132 | } |
133 | 133 | ||
134 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ | 134 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ |
135 | static int wol_supported(const struct skge_hw *hw) | 135 | static u32 wol_supported(const struct skge_hw *hw) |
136 | { | 136 | { |
137 | return !((hw->chip_id == CHIP_ID_GENESIS || | 137 | if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev != 0) |
138 | (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0))); | 138 | return WAKE_MAGIC | WAKE_PHY; |
139 | else | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static u32 pci_wake_enabled(struct pci_dev *dev) | ||
144 | { | ||
145 | int pm = pci_find_capability(dev, PCI_CAP_ID_PM); | ||
146 | u16 value; | ||
147 | |||
148 | /* If device doesn't support PM Capabilities, but request is to disable | ||
149 | * wake events, it's a nop; otherwise fail */ | ||
150 | if (!pm) | ||
151 | return 0; | ||
152 | |||
153 | pci_read_config_word(dev, pm + PCI_PM_PMC, &value); | ||
154 | |||
155 | value &= PCI_PM_CAP_PME_MASK; | ||
156 | value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ | ||
157 | |||
158 | return value != 0; | ||
159 | } | ||
160 | |||
161 | static void skge_wol_init(struct skge_port *skge) | ||
162 | { | ||
163 | struct skge_hw *hw = skge->hw; | ||
164 | int port = skge->port; | ||
165 | enum pause_control save_mode; | ||
166 | u32 ctrl; | ||
167 | |||
168 | /* Bring hardware out of reset */ | ||
169 | skge_write16(hw, B0_CTST, CS_RST_CLR); | ||
170 | skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); | ||
171 | |||
172 | skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); | ||
173 | skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); | ||
174 | |||
175 | /* Force to 10/100 skge_reset will re-enable on resume */ | ||
176 | save_mode = skge->flow_control; | ||
177 | skge->flow_control = FLOW_MODE_SYMMETRIC; | ||
178 | |||
179 | ctrl = skge->advertising; | ||
180 | skge->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); | ||
181 | |||
182 | skge_phy_reset(skge); | ||
183 | |||
184 | skge->flow_control = save_mode; | ||
185 | skge->advertising = ctrl; | ||
186 | |||
187 | /* Set GMAC to no flow control and auto update for speed/duplex */ | ||
188 | gma_write16(hw, port, GM_GP_CTRL, | ||
189 | GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| | ||
190 | GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); | ||
191 | |||
192 | /* Set WOL address */ | ||
193 | memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), | ||
194 | skge->netdev->dev_addr, ETH_ALEN); | ||
195 | |||
196 | /* Turn on appropriate WOL control bits */ | ||
197 | skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); | ||
198 | ctrl = 0; | ||
199 | if (skge->wol & WAKE_PHY) | ||
200 | ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; | ||
201 | else | ||
202 | ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; | ||
203 | |||
204 | if (skge->wol & WAKE_MAGIC) | ||
205 | ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; | ||
206 | else | ||
207 | ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;; | ||
208 | |||
209 | ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; | ||
210 | skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); | ||
211 | |||
212 | /* block receiver */ | ||
213 | skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); | ||
139 | } | 214 | } |
140 | 215 | ||
141 | static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 216 | static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
142 | { | 217 | { |
143 | struct skge_port *skge = netdev_priv(dev); | 218 | struct skge_port *skge = netdev_priv(dev); |
144 | 219 | ||
145 | wol->supported = wol_supported(skge->hw) ? WAKE_MAGIC : 0; | 220 | wol->supported = wol_supported(skge->hw); |
146 | wol->wolopts = skge->wol ? WAKE_MAGIC : 0; | 221 | wol->wolopts = skge->wol; |
147 | } | 222 | } |
148 | 223 | ||
149 | static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 224 | static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
@@ -151,23 +226,12 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | |||
151 | struct skge_port *skge = netdev_priv(dev); | 226 | struct skge_port *skge = netdev_priv(dev); |
152 | struct skge_hw *hw = skge->hw; | 227 | struct skge_hw *hw = skge->hw; |
153 | 228 | ||
154 | if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) | 229 | if (wol->wolopts & wol_supported(hw)) |
155 | return -EOPNOTSUPP; | 230 | return -EOPNOTSUPP; |
156 | 231 | ||
157 | if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw)) | 232 | skge->wol = wol->wolopts; |
158 | return -EOPNOTSUPP; | 233 | if (!netif_running(dev)) |
159 | 234 | skge_wol_init(skge); | |
160 | skge->wol = wol->wolopts == WAKE_MAGIC; | ||
161 | |||
162 | if (skge->wol) { | ||
163 | memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN); | ||
164 | |||
165 | skge_write16(hw, WOL_CTRL_STAT, | ||
166 | WOL_CTL_ENA_PME_ON_MAGIC_PKT | | ||
167 | WOL_CTL_ENA_MAGIC_PKT_UNIT); | ||
168 | } else | ||
169 | skge_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT); | ||
170 | |||
171 | return 0; | 235 | return 0; |
172 | } | 236 | } |
173 | 237 | ||
@@ -2373,6 +2437,9 @@ static int skge_up(struct net_device *dev) | |||
2373 | size_t rx_size, tx_size; | 2437 | size_t rx_size, tx_size; |
2374 | int err; | 2438 | int err; |
2375 | 2439 | ||
2440 | if (!is_valid_ether_addr(dev->dev_addr)) | ||
2441 | return -EINVAL; | ||
2442 | |||
2376 | if (netif_msg_ifup(skge)) | 2443 | if (netif_msg_ifup(skge)) |
2377 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); | 2444 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); |
2378 | 2445 | ||
@@ -2392,7 +2459,7 @@ static int skge_up(struct net_device *dev) | |||
2392 | BUG_ON(skge->dma & 7); | 2459 | BUG_ON(skge->dma & 7); |
2393 | 2460 | ||
2394 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { | 2461 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { |
2395 | printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n"); | 2462 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); |
2396 | err = -EINVAL; | 2463 | err = -EINVAL; |
2397 | goto free_pci_mem; | 2464 | goto free_pci_mem; |
2398 | } | 2465 | } |
@@ -3001,6 +3068,7 @@ static void skge_mac_intr(struct skge_hw *hw, int port) | |||
3001 | /* Handle device specific framing and timeout interrupts */ | 3068 | /* Handle device specific framing and timeout interrupts */ |
3002 | static void skge_error_irq(struct skge_hw *hw) | 3069 | static void skge_error_irq(struct skge_hw *hw) |
3003 | { | 3070 | { |
3071 | struct pci_dev *pdev = hw->pdev; | ||
3004 | u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); | 3072 | u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); |
3005 | 3073 | ||
3006 | if (hw->chip_id == CHIP_ID_GENESIS) { | 3074 | if (hw->chip_id == CHIP_ID_GENESIS) { |
@@ -3016,12 +3084,12 @@ static void skge_error_irq(struct skge_hw *hw) | |||
3016 | } | 3084 | } |
3017 | 3085 | ||
3018 | if (hwstatus & IS_RAM_RD_PAR) { | 3086 | if (hwstatus & IS_RAM_RD_PAR) { |
3019 | printk(KERN_ERR PFX "Ram read data parity error\n"); | 3087 | dev_err(&pdev->dev, "Ram read data parity error\n"); |
3020 | skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); | 3088 | skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); |
3021 | } | 3089 | } |
3022 | 3090 | ||
3023 | if (hwstatus & IS_RAM_WR_PAR) { | 3091 | if (hwstatus & IS_RAM_WR_PAR) { |
3024 | printk(KERN_ERR PFX "Ram write data parity error\n"); | 3092 | dev_err(&pdev->dev, "Ram write data parity error\n"); |
3025 | skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); | 3093 | skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); |
3026 | } | 3094 | } |
3027 | 3095 | ||
@@ -3032,38 +3100,38 @@ static void skge_error_irq(struct skge_hw *hw) | |||
3032 | skge_mac_parity(hw, 1); | 3100 | skge_mac_parity(hw, 1); |
3033 | 3101 | ||
3034 | if (hwstatus & IS_R1_PAR_ERR) { | 3102 | if (hwstatus & IS_R1_PAR_ERR) { |
3035 | printk(KERN_ERR PFX "%s: receive queue parity error\n", | 3103 | dev_err(&pdev->dev, "%s: receive queue parity error\n", |
3036 | hw->dev[0]->name); | 3104 | hw->dev[0]->name); |
3037 | skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); | 3105 | skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); |
3038 | } | 3106 | } |
3039 | 3107 | ||
3040 | if (hwstatus & IS_R2_PAR_ERR) { | 3108 | if (hwstatus & IS_R2_PAR_ERR) { |
3041 | printk(KERN_ERR PFX "%s: receive queue parity error\n", | 3109 | dev_err(&pdev->dev, "%s: receive queue parity error\n", |
3042 | hw->dev[1]->name); | 3110 | hw->dev[1]->name); |
3043 | skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); | 3111 | skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); |
3044 | } | 3112 | } |
3045 | 3113 | ||
3046 | if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { | 3114 | if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { |
3047 | u16 pci_status, pci_cmd; | 3115 | u16 pci_status, pci_cmd; |
3048 | 3116 | ||
3049 | pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd); | 3117 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); |
3050 | pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); | 3118 | pci_read_config_word(pdev, PCI_STATUS, &pci_status); |
3051 | 3119 | ||
3052 | printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n", | 3120 | dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", |
3053 | pci_name(hw->pdev), pci_cmd, pci_status); | 3121 | pci_cmd, pci_status); |
3054 | 3122 | ||
3055 | /* Write the error bits back to clear them. */ | 3123 | /* Write the error bits back to clear them. */ |
3056 | pci_status &= PCI_STATUS_ERROR_BITS; | 3124 | pci_status &= PCI_STATUS_ERROR_BITS; |
3057 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | 3125 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
3058 | pci_write_config_word(hw->pdev, PCI_COMMAND, | 3126 | pci_write_config_word(pdev, PCI_COMMAND, |
3059 | pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); | 3127 | pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); |
3060 | pci_write_config_word(hw->pdev, PCI_STATUS, pci_status); | 3128 | pci_write_config_word(pdev, PCI_STATUS, pci_status); |
3061 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | 3129 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); |
3062 | 3130 | ||
3063 | /* if error still set then just ignore it */ | 3131 | /* if error still set then just ignore it */ |
3064 | hwstatus = skge_read32(hw, B0_HWE_ISRC); | 3132 | hwstatus = skge_read32(hw, B0_HWE_ISRC); |
3065 | if (hwstatus & IS_IRQ_STAT) { | 3133 | if (hwstatus & IS_IRQ_STAT) { |
3066 | printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n"); | 3134 | dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); |
3067 | hw->intr_mask &= ~IS_HW_ERR; | 3135 | hw->intr_mask &= ~IS_HW_ERR; |
3068 | } | 3136 | } |
3069 | } | 3137 | } |
@@ -3277,8 +3345,8 @@ static int skge_reset(struct skge_hw *hw) | |||
3277 | hw->phy_addr = PHY_ADDR_BCOM; | 3345 | hw->phy_addr = PHY_ADDR_BCOM; |
3278 | break; | 3346 | break; |
3279 | default: | 3347 | default: |
3280 | printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n", | 3348 | dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", |
3281 | pci_name(hw->pdev), hw->phy_type); | 3349 | hw->phy_type); |
3282 | return -EOPNOTSUPP; | 3350 | return -EOPNOTSUPP; |
3283 | } | 3351 | } |
3284 | break; | 3352 | break; |
@@ -3293,8 +3361,8 @@ static int skge_reset(struct skge_hw *hw) | |||
3293 | break; | 3361 | break; |
3294 | 3362 | ||
3295 | default: | 3363 | default: |
3296 | printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n", | 3364 | dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", |
3297 | pci_name(hw->pdev), hw->chip_id); | 3365 | hw->chip_id); |
3298 | return -EOPNOTSUPP; | 3366 | return -EOPNOTSUPP; |
3299 | } | 3367 | } |
3300 | 3368 | ||
@@ -3334,7 +3402,7 @@ static int skge_reset(struct skge_hw *hw) | |||
3334 | /* avoid boards with stuck Hardware error bits */ | 3402 | /* avoid boards with stuck Hardware error bits */ |
3335 | if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && | 3403 | if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && |
3336 | (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { | 3404 | (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { |
3337 | printk(KERN_WARNING PFX "stuck hardware sensor bit\n"); | 3405 | dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); |
3338 | hw->intr_mask &= ~IS_HW_ERR; | 3406 | hw->intr_mask &= ~IS_HW_ERR; |
3339 | } | 3407 | } |
3340 | 3408 | ||
@@ -3408,7 +3476,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3408 | struct net_device *dev = alloc_etherdev(sizeof(*skge)); | 3476 | struct net_device *dev = alloc_etherdev(sizeof(*skge)); |
3409 | 3477 | ||
3410 | if (!dev) { | 3478 | if (!dev) { |
3411 | printk(KERN_ERR "skge etherdev alloc failed"); | 3479 | dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); |
3412 | return NULL; | 3480 | return NULL; |
3413 | } | 3481 | } |
3414 | 3482 | ||
@@ -3452,6 +3520,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3452 | skge->duplex = -1; | 3520 | skge->duplex = -1; |
3453 | skge->speed = -1; | 3521 | skge->speed = -1; |
3454 | skge->advertising = skge_supported_modes(hw); | 3522 | skge->advertising = skge_supported_modes(hw); |
3523 | skge->wol = pci_wake_enabled(hw->pdev) ? wol_supported(hw) : 0; | ||
3455 | 3524 | ||
3456 | hw->dev[port] = dev; | 3525 | hw->dev[port] = dev; |
3457 | 3526 | ||
@@ -3496,15 +3565,13 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3496 | 3565 | ||
3497 | err = pci_enable_device(pdev); | 3566 | err = pci_enable_device(pdev); |
3498 | if (err) { | 3567 | if (err) { |
3499 | printk(KERN_ERR PFX "%s cannot enable PCI device\n", | 3568 | dev_err(&pdev->dev, "cannot enable PCI device\n"); |
3500 | pci_name(pdev)); | ||
3501 | goto err_out; | 3569 | goto err_out; |
3502 | } | 3570 | } |
3503 | 3571 | ||
3504 | err = pci_request_regions(pdev, DRV_NAME); | 3572 | err = pci_request_regions(pdev, DRV_NAME); |
3505 | if (err) { | 3573 | if (err) { |
3506 | printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", | 3574 | dev_err(&pdev->dev, "cannot obtain PCI resources\n"); |
3507 | pci_name(pdev)); | ||
3508 | goto err_out_disable_pdev; | 3575 | goto err_out_disable_pdev; |
3509 | } | 3576 | } |
3510 | 3577 | ||
@@ -3519,8 +3586,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3519 | } | 3586 | } |
3520 | 3587 | ||
3521 | if (err) { | 3588 | if (err) { |
3522 | printk(KERN_ERR PFX "%s no usable DMA configuration\n", | 3589 | dev_err(&pdev->dev, "no usable DMA configuration\n"); |
3523 | pci_name(pdev)); | ||
3524 | goto err_out_free_regions; | 3590 | goto err_out_free_regions; |
3525 | } | 3591 | } |
3526 | 3592 | ||
@@ -3538,8 +3604,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3538 | err = -ENOMEM; | 3604 | err = -ENOMEM; |
3539 | hw = kzalloc(sizeof(*hw), GFP_KERNEL); | 3605 | hw = kzalloc(sizeof(*hw), GFP_KERNEL); |
3540 | if (!hw) { | 3606 | if (!hw) { |
3541 | printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", | 3607 | dev_err(&pdev->dev, "cannot allocate hardware struct\n"); |
3542 | pci_name(pdev)); | ||
3543 | goto err_out_free_regions; | 3608 | goto err_out_free_regions; |
3544 | } | 3609 | } |
3545 | 3610 | ||
@@ -3550,8 +3615,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3550 | 3615 | ||
3551 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); | 3616 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
3552 | if (!hw->regs) { | 3617 | if (!hw->regs) { |
3553 | printk(KERN_ERR PFX "%s: cannot map device registers\n", | 3618 | dev_err(&pdev->dev, "cannot map device registers\n"); |
3554 | pci_name(pdev)); | ||
3555 | goto err_out_free_hw; | 3619 | goto err_out_free_hw; |
3556 | } | 3620 | } |
3557 | 3621 | ||
@@ -3567,23 +3631,19 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3567 | if (!dev) | 3631 | if (!dev) |
3568 | goto err_out_led_off; | 3632 | goto err_out_led_off; |
3569 | 3633 | ||
3570 | if (!is_valid_ether_addr(dev->dev_addr)) { | 3634 | /* Some motherboards are broken and has zero in ROM. */ |
3571 | printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n", | 3635 | if (!is_valid_ether_addr(dev->dev_addr)) |
3572 | pci_name(pdev)); | 3636 | dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); |
3573 | err = -EIO; | ||
3574 | goto err_out_free_netdev; | ||
3575 | } | ||
3576 | 3637 | ||
3577 | err = register_netdev(dev); | 3638 | err = register_netdev(dev); |
3578 | if (err) { | 3639 | if (err) { |
3579 | printk(KERN_ERR PFX "%s: cannot register net device\n", | 3640 | dev_err(&pdev->dev, "cannot register net device\n"); |
3580 | pci_name(pdev)); | ||
3581 | goto err_out_free_netdev; | 3641 | goto err_out_free_netdev; |
3582 | } | 3642 | } |
3583 | 3643 | ||
3584 | err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); | 3644 | err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); |
3585 | if (err) { | 3645 | if (err) { |
3586 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | 3646 | dev_err(&pdev->dev, "%s: cannot assign irq %d\n", |
3587 | dev->name, pdev->irq); | 3647 | dev->name, pdev->irq); |
3588 | goto err_out_unregister; | 3648 | goto err_out_unregister; |
3589 | } | 3649 | } |
@@ -3594,7 +3654,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3594 | skge_show_addr(dev1); | 3654 | skge_show_addr(dev1); |
3595 | else { | 3655 | else { |
3596 | /* Failure to register second port need not be fatal */ | 3656 | /* Failure to register second port need not be fatal */ |
3597 | printk(KERN_WARNING PFX "register of second port failed\n"); | 3657 | dev_warn(&pdev->dev, "register of second port failed\n"); |
3598 | hw->dev[1] = NULL; | 3658 | hw->dev[1] = NULL; |
3599 | free_netdev(dev1); | 3659 | free_netdev(dev1); |
3600 | } | 3660 | } |
@@ -3659,28 +3719,46 @@ static void __devexit skge_remove(struct pci_dev *pdev) | |||
3659 | } | 3719 | } |
3660 | 3720 | ||
3661 | #ifdef CONFIG_PM | 3721 | #ifdef CONFIG_PM |
3722 | static int vaux_avail(struct pci_dev *pdev) | ||
3723 | { | ||
3724 | int pm_cap; | ||
3725 | |||
3726 | pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); | ||
3727 | if (pm_cap) { | ||
3728 | u16 ctl; | ||
3729 | pci_read_config_word(pdev, pm_cap + PCI_PM_PMC, &ctl); | ||
3730 | if (ctl & PCI_PM_CAP_AUX_POWER) | ||
3731 | return 1; | ||
3732 | } | ||
3733 | return 0; | ||
3734 | } | ||
3735 | |||
3736 | |||
3662 | static int skge_suspend(struct pci_dev *pdev, pm_message_t state) | 3737 | static int skge_suspend(struct pci_dev *pdev, pm_message_t state) |
3663 | { | 3738 | { |
3664 | struct skge_hw *hw = pci_get_drvdata(pdev); | 3739 | struct skge_hw *hw = pci_get_drvdata(pdev); |
3665 | int i, wol = 0; | 3740 | int i, err, wol = 0; |
3741 | |||
3742 | err = pci_save_state(pdev); | ||
3743 | if (err) | ||
3744 | return err; | ||
3666 | 3745 | ||
3667 | pci_save_state(pdev); | ||
3668 | for (i = 0; i < hw->ports; i++) { | 3746 | for (i = 0; i < hw->ports; i++) { |
3669 | struct net_device *dev = hw->dev[i]; | 3747 | struct net_device *dev = hw->dev[i]; |
3748 | struct skge_port *skge = netdev_priv(dev); | ||
3670 | 3749 | ||
3671 | if (netif_running(dev)) { | 3750 | if (netif_running(dev)) |
3672 | struct skge_port *skge = netdev_priv(dev); | 3751 | skge_down(dev); |
3752 | if (skge->wol) | ||
3753 | skge_wol_init(skge); | ||
3673 | 3754 | ||
3674 | netif_carrier_off(dev); | 3755 | wol |= skge->wol; |
3675 | if (skge->wol) | ||
3676 | netif_stop_queue(dev); | ||
3677 | else | ||
3678 | skge_down(dev); | ||
3679 | wol |= skge->wol; | ||
3680 | } | ||
3681 | netif_device_detach(dev); | ||
3682 | } | 3756 | } |
3683 | 3757 | ||
3758 | if (wol && vaux_avail(pdev)) | ||
3759 | skge_write8(hw, B0_POWER_CTRL, | ||
3760 | PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); | ||
3761 | |||
3684 | skge_write32(hw, B0_IMSK, 0); | 3762 | skge_write32(hw, B0_IMSK, 0); |
3685 | pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); | 3763 | pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); |
3686 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 3764 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
@@ -3693,8 +3771,14 @@ static int skge_resume(struct pci_dev *pdev) | |||
3693 | struct skge_hw *hw = pci_get_drvdata(pdev); | 3771 | struct skge_hw *hw = pci_get_drvdata(pdev); |
3694 | int i, err; | 3772 | int i, err; |
3695 | 3773 | ||
3696 | pci_set_power_state(pdev, PCI_D0); | 3774 | err = pci_set_power_state(pdev, PCI_D0); |
3697 | pci_restore_state(pdev); | 3775 | if (err) |
3776 | goto out; | ||
3777 | |||
3778 | err = pci_restore_state(pdev); | ||
3779 | if (err) | ||
3780 | goto out; | ||
3781 | |||
3698 | pci_enable_wake(pdev, PCI_D0, 0); | 3782 | pci_enable_wake(pdev, PCI_D0, 0); |
3699 | 3783 | ||
3700 | err = skge_reset(hw); | 3784 | err = skge_reset(hw); |
@@ -3704,7 +3788,6 @@ static int skge_resume(struct pci_dev *pdev) | |||
3704 | for (i = 0; i < hw->ports; i++) { | 3788 | for (i = 0; i < hw->ports; i++) { |
3705 | struct net_device *dev = hw->dev[i]; | 3789 | struct net_device *dev = hw->dev[i]; |
3706 | 3790 | ||
3707 | netif_device_attach(dev); | ||
3708 | if (netif_running(dev)) { | 3791 | if (netif_running(dev)) { |
3709 | err = skge_up(dev); | 3792 | err = skge_up(dev); |
3710 | 3793 | ||