diff options
Diffstat (limited to 'drivers/net/qlge/qlge_main.c')
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 193 |
1 files changed, 132 insertions, 61 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 3d0efea3211..cea7531f4f4 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/etherdevice.h> | 34 | #include <linux/etherdevice.h> |
35 | #include <linux/ethtool.h> | 35 | #include <linux/ethtool.h> |
36 | #include <linux/skbuff.h> | 36 | #include <linux/skbuff.h> |
37 | #include <linux/rtnetlink.h> | ||
38 | #include <linux/if_vlan.h> | 37 | #include <linux/if_vlan.h> |
39 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
40 | #include <linux/mm.h> | 39 | #include <linux/mm.h> |
@@ -321,6 +320,37 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | |||
321 | 320 | ||
322 | switch (type) { | 321 | switch (type) { |
323 | case MAC_ADDR_TYPE_MULTI_MAC: | 322 | case MAC_ADDR_TYPE_MULTI_MAC: |
323 | { | ||
324 | u32 upper = (addr[0] << 8) | addr[1]; | ||
325 | u32 lower = (addr[2] << 24) | (addr[3] << 16) | | ||
326 | (addr[4] << 8) | (addr[5]); | ||
327 | |||
328 | status = | ||
329 | ql_wait_reg_rdy(qdev, | ||
330 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); | ||
331 | if (status) | ||
332 | goto exit; | ||
333 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | | ||
334 | (index << MAC_ADDR_IDX_SHIFT) | | ||
335 | type | MAC_ADDR_E); | ||
336 | ql_write32(qdev, MAC_ADDR_DATA, lower); | ||
337 | status = | ||
338 | ql_wait_reg_rdy(qdev, | ||
339 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); | ||
340 | if (status) | ||
341 | goto exit; | ||
342 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | | ||
343 | (index << MAC_ADDR_IDX_SHIFT) | | ||
344 | type | MAC_ADDR_E); | ||
345 | |||
346 | ql_write32(qdev, MAC_ADDR_DATA, upper); | ||
347 | status = | ||
348 | ql_wait_reg_rdy(qdev, | ||
349 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); | ||
350 | if (status) | ||
351 | goto exit; | ||
352 | break; | ||
353 | } | ||
324 | case MAC_ADDR_TYPE_CAM_MAC: | 354 | case MAC_ADDR_TYPE_CAM_MAC: |
325 | { | 355 | { |
326 | u32 cam_output; | 356 | u32 cam_output; |
@@ -366,16 +396,14 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | |||
366 | and possibly the function id. Right now we hardcode | 396 | and possibly the function id. Right now we hardcode |
367 | the route field to NIC core. | 397 | the route field to NIC core. |
368 | */ | 398 | */ |
369 | if (type == MAC_ADDR_TYPE_CAM_MAC) { | 399 | cam_output = (CAM_OUT_ROUTE_NIC | |
370 | cam_output = (CAM_OUT_ROUTE_NIC | | 400 | (qdev-> |
371 | (qdev-> | 401 | func << CAM_OUT_FUNC_SHIFT) | |
372 | func << CAM_OUT_FUNC_SHIFT) | | 402 | (0 << CAM_OUT_CQ_ID_SHIFT)); |
373 | (0 << CAM_OUT_CQ_ID_SHIFT)); | 403 | if (qdev->vlgrp) |
374 | if (qdev->vlgrp) | 404 | cam_output |= CAM_OUT_RV; |
375 | cam_output |= CAM_OUT_RV; | 405 | /* route to NIC core */ |
376 | /* route to NIC core */ | 406 | ql_write32(qdev, MAC_ADDR_DATA, cam_output); |
377 | ql_write32(qdev, MAC_ADDR_DATA, cam_output); | ||
378 | } | ||
379 | break; | 407 | break; |
380 | } | 408 | } |
381 | case MAC_ADDR_TYPE_VLAN: | 409 | case MAC_ADDR_TYPE_VLAN: |
@@ -547,14 +575,14 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, | |||
547 | } | 575 | } |
548 | case RT_IDX_MCAST: /* Pass up All Multicast frames. */ | 576 | case RT_IDX_MCAST: /* Pass up All Multicast frames. */ |
549 | { | 577 | { |
550 | value = RT_IDX_DST_CAM_Q | /* dest */ | 578 | value = RT_IDX_DST_DFLT_Q | /* dest */ |
551 | RT_IDX_TYPE_NICQ | /* type */ | 579 | RT_IDX_TYPE_NICQ | /* type */ |
552 | (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ | 580 | (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ |
553 | break; | 581 | break; |
554 | } | 582 | } |
555 | case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ | 583 | case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ |
556 | { | 584 | { |
557 | value = RT_IDX_DST_CAM_Q | /* dest */ | 585 | value = RT_IDX_DST_DFLT_Q | /* dest */ |
558 | RT_IDX_TYPE_NICQ | /* type */ | 586 | RT_IDX_TYPE_NICQ | /* type */ |
559 | (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ | 587 | (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ |
560 | break; | 588 | break; |
@@ -1926,12 +1954,10 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) | |||
1926 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | 1954 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
1927 | if (status) | 1955 | if (status) |
1928 | return; | 1956 | return; |
1929 | spin_lock(&qdev->hw_lock); | ||
1930 | if (ql_set_mac_addr_reg | 1957 | if (ql_set_mac_addr_reg |
1931 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | 1958 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { |
1932 | QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); | 1959 | QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); |
1933 | } | 1960 | } |
1934 | spin_unlock(&qdev->hw_lock); | ||
1935 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 1961 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
1936 | } | 1962 | } |
1937 | 1963 | ||
@@ -1945,12 +1971,10 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) | |||
1945 | if (status) | 1971 | if (status) |
1946 | return; | 1972 | return; |
1947 | 1973 | ||
1948 | spin_lock(&qdev->hw_lock); | ||
1949 | if (ql_set_mac_addr_reg | 1974 | if (ql_set_mac_addr_reg |
1950 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { | 1975 | (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { |
1951 | QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); | 1976 | QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); |
1952 | } | 1977 | } |
1953 | spin_unlock(&qdev->hw_lock); | ||
1954 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 1978 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
1955 | 1979 | ||
1956 | } | 1980 | } |
@@ -2001,15 +2025,17 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
2001 | /* | 2025 | /* |
2002 | * Check MPI processor activity. | 2026 | * Check MPI processor activity. |
2003 | */ | 2027 | */ |
2004 | if (var & STS_PI) { | 2028 | if ((var & STS_PI) && |
2029 | (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { | ||
2005 | /* | 2030 | /* |
2006 | * We've got an async event or mailbox completion. | 2031 | * We've got an async event or mailbox completion. |
2007 | * Handle it and clear the source of the interrupt. | 2032 | * Handle it and clear the source of the interrupt. |
2008 | */ | 2033 | */ |
2009 | QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); | 2034 | QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); |
2010 | ql_disable_completion_interrupt(qdev, intr_context->intr); | 2035 | ql_disable_completion_interrupt(qdev, intr_context->intr); |
2011 | queue_delayed_work_on(smp_processor_id(), qdev->workqueue, | 2036 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); |
2012 | &qdev->mpi_work, 0); | 2037 | queue_delayed_work_on(smp_processor_id(), |
2038 | qdev->workqueue, &qdev->mpi_work, 0); | ||
2013 | work_done++; | 2039 | work_done++; |
2014 | } | 2040 | } |
2015 | 2041 | ||
@@ -3080,6 +3106,12 @@ err_irq: | |||
3080 | 3106 | ||
3081 | static int ql_start_rss(struct ql_adapter *qdev) | 3107 | static int ql_start_rss(struct ql_adapter *qdev) |
3082 | { | 3108 | { |
3109 | u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, | ||
3110 | 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, | ||
3111 | 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, | ||
3112 | 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, | ||
3113 | 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, | ||
3114 | 0xbe, 0xac, 0x01, 0xfa}; | ||
3083 | struct ricb *ricb = &qdev->ricb; | 3115 | struct ricb *ricb = &qdev->ricb; |
3084 | int status = 0; | 3116 | int status = 0; |
3085 | int i; | 3117 | int i; |
@@ -3089,21 +3121,17 @@ static int ql_start_rss(struct ql_adapter *qdev) | |||
3089 | 3121 | ||
3090 | ricb->base_cq = RSS_L4K; | 3122 | ricb->base_cq = RSS_L4K; |
3091 | ricb->flags = | 3123 | ricb->flags = |
3092 | (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 | | 3124 | (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); |
3093 | RSS_RT6); | 3125 | ricb->mask = cpu_to_le16((u16)(0x3ff)); |
3094 | ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1); | ||
3095 | 3126 | ||
3096 | /* | 3127 | /* |
3097 | * Fill out the Indirection Table. | 3128 | * Fill out the Indirection Table. |
3098 | */ | 3129 | */ |
3099 | for (i = 0; i < 256; i++) | 3130 | for (i = 0; i < 1024; i++) |
3100 | hash_id[i] = i & (qdev->rss_ring_count - 1); | 3131 | hash_id[i] = (i & (qdev->rss_ring_count - 1)); |
3101 | 3132 | ||
3102 | /* | 3133 | memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); |
3103 | * Random values for the IPv6 and IPv4 Hash Keys. | 3134 | memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); |
3104 | */ | ||
3105 | get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40); | ||
3106 | get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16); | ||
3107 | 3135 | ||
3108 | QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); | 3136 | QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); |
3109 | 3137 | ||
@@ -3242,6 +3270,13 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) | |||
3242 | ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | | 3270 | ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | |
3243 | min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); | 3271 | min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); |
3244 | 3272 | ||
3273 | /* Set RX packet routing to use port/pci function on which the | ||
3274 | * packet arrived on in addition to usual frame routing. | ||
3275 | * This is helpful on bonding where both interfaces can have | ||
3276 | * the same MAC address. | ||
3277 | */ | ||
3278 | ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); | ||
3279 | |||
3245 | /* Start up the rx queues. */ | 3280 | /* Start up the rx queues. */ |
3246 | for (i = 0; i < qdev->rx_ring_count; i++) { | 3281 | for (i = 0; i < qdev->rx_ring_count; i++) { |
3247 | status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); | 3282 | status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); |
@@ -3314,6 +3349,13 @@ static int ql_adapter_reset(struct ql_adapter *qdev) | |||
3314 | 3349 | ||
3315 | end_jiffies = jiffies + | 3350 | end_jiffies = jiffies + |
3316 | max((unsigned long)1, usecs_to_jiffies(30)); | 3351 | max((unsigned long)1, usecs_to_jiffies(30)); |
3352 | |||
3353 | /* Stop management traffic. */ | ||
3354 | ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); | ||
3355 | |||
3356 | /* Wait for the NIC and MGMNT FIFOs to empty. */ | ||
3357 | ql_wait_fifo_empty(qdev); | ||
3358 | |||
3317 | ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); | 3359 | ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); |
3318 | 3360 | ||
3319 | do { | 3361 | do { |
@@ -3329,6 +3371,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev) | |||
3329 | status = -ETIMEDOUT; | 3371 | status = -ETIMEDOUT; |
3330 | } | 3372 | } |
3331 | 3373 | ||
3374 | /* Resume management traffic. */ | ||
3375 | ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); | ||
3332 | return status; | 3376 | return status; |
3333 | } | 3377 | } |
3334 | 3378 | ||
@@ -3585,7 +3629,6 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
3585 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); | 3629 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); |
3586 | if (status) | 3630 | if (status) |
3587 | return; | 3631 | return; |
3588 | spin_lock(&qdev->hw_lock); | ||
3589 | /* | 3632 | /* |
3590 | * Set or clear promiscuous mode if a | 3633 | * Set or clear promiscuous mode if a |
3591 | * transition is taking place. | 3634 | * transition is taking place. |
@@ -3662,7 +3705,6 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
3662 | } | 3705 | } |
3663 | } | 3706 | } |
3664 | exit: | 3707 | exit: |
3665 | spin_unlock(&qdev->hw_lock); | ||
3666 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | 3708 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); |
3667 | } | 3709 | } |
3668 | 3710 | ||
@@ -3682,10 +3724,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) | |||
3682 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | 3724 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
3683 | if (status) | 3725 | if (status) |
3684 | return status; | 3726 | return status; |
3685 | spin_lock(&qdev->hw_lock); | ||
3686 | status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, | 3727 | status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, |
3687 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); | 3728 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); |
3688 | spin_unlock(&qdev->hw_lock); | ||
3689 | if (status) | 3729 | if (status) |
3690 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); | 3730 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); |
3691 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | 3731 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
@@ -3711,6 +3751,12 @@ static void ql_asic_reset_work(struct work_struct *work) | |||
3711 | status = ql_adapter_up(qdev); | 3751 | status = ql_adapter_up(qdev); |
3712 | if (status) | 3752 | if (status) |
3713 | goto error; | 3753 | goto error; |
3754 | |||
3755 | /* Restore rx mode. */ | ||
3756 | clear_bit(QL_ALLMULTI, &qdev->flags); | ||
3757 | clear_bit(QL_PROMISCUOUS, &qdev->flags); | ||
3758 | qlge_set_multicast_list(qdev->ndev); | ||
3759 | |||
3714 | rtnl_unlock(); | 3760 | rtnl_unlock(); |
3715 | return; | 3761 | return; |
3716 | error: | 3762 | error: |
@@ -3870,6 +3916,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
3870 | goto err_out; | 3916 | goto err_out; |
3871 | } | 3917 | } |
3872 | 3918 | ||
3919 | pci_save_state(pdev); | ||
3873 | qdev->reg_base = | 3920 | qdev->reg_base = |
3874 | ioremap_nocache(pci_resource_start(pdev, 1), | 3921 | ioremap_nocache(pci_resource_start(pdev, 1), |
3875 | pci_resource_len(pdev, 1)); | 3922 | pci_resource_len(pdev, 1)); |
@@ -3928,7 +3975,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev, | |||
3928 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); | 3975 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); |
3929 | INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); | 3976 | INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); |
3930 | INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); | 3977 | INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); |
3931 | mutex_init(&qdev->mpi_mutex); | ||
3932 | init_completion(&qdev->ide_completion); | 3978 | init_completion(&qdev->ide_completion); |
3933 | 3979 | ||
3934 | if (!cards_found) { | 3980 | if (!cards_found) { |
@@ -4025,6 +4071,33 @@ static void __devexit qlge_remove(struct pci_dev *pdev) | |||
4025 | free_netdev(ndev); | 4071 | free_netdev(ndev); |
4026 | } | 4072 | } |
4027 | 4073 | ||
4074 | /* Clean up resources without touching hardware. */ | ||
4075 | static void ql_eeh_close(struct net_device *ndev) | ||
4076 | { | ||
4077 | int i; | ||
4078 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
4079 | |||
4080 | if (netif_carrier_ok(ndev)) { | ||
4081 | netif_carrier_off(ndev); | ||
4082 | netif_stop_queue(ndev); | ||
4083 | } | ||
4084 | |||
4085 | if (test_bit(QL_ADAPTER_UP, &qdev->flags)) | ||
4086 | cancel_delayed_work_sync(&qdev->asic_reset_work); | ||
4087 | cancel_delayed_work_sync(&qdev->mpi_reset_work); | ||
4088 | cancel_delayed_work_sync(&qdev->mpi_work); | ||
4089 | cancel_delayed_work_sync(&qdev->mpi_idc_work); | ||
4090 | cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); | ||
4091 | |||
4092 | for (i = 0; i < qdev->rss_ring_count; i++) | ||
4093 | netif_napi_del(&qdev->rx_ring[i].napi); | ||
4094 | |||
4095 | clear_bit(QL_ADAPTER_UP, &qdev->flags); | ||
4096 | ql_tx_ring_clean(qdev); | ||
4097 | ql_free_rx_buffers(qdev); | ||
4098 | ql_release_adapter_resources(qdev); | ||
4099 | } | ||
4100 | |||
4028 | /* | 4101 | /* |
4029 | * This callback is called by the PCI subsystem whenever | 4102 | * This callback is called by the PCI subsystem whenever |
4030 | * a PCI bus error is detected. | 4103 | * a PCI bus error is detected. |
@@ -4033,17 +4106,21 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | |||
4033 | enum pci_channel_state state) | 4106 | enum pci_channel_state state) |
4034 | { | 4107 | { |
4035 | struct net_device *ndev = pci_get_drvdata(pdev); | 4108 | struct net_device *ndev = pci_get_drvdata(pdev); |
4036 | struct ql_adapter *qdev = netdev_priv(ndev); | ||
4037 | 4109 | ||
4038 | netif_device_detach(ndev); | 4110 | switch (state) { |
4039 | 4111 | case pci_channel_io_normal: | |
4040 | if (state == pci_channel_io_perm_failure) | 4112 | return PCI_ERS_RESULT_CAN_RECOVER; |
4113 | case pci_channel_io_frozen: | ||
4114 | netif_device_detach(ndev); | ||
4115 | if (netif_running(ndev)) | ||
4116 | ql_eeh_close(ndev); | ||
4117 | pci_disable_device(pdev); | ||
4118 | return PCI_ERS_RESULT_NEED_RESET; | ||
4119 | case pci_channel_io_perm_failure: | ||
4120 | dev_err(&pdev->dev, | ||
4121 | "%s: pci_channel_io_perm_failure.\n", __func__); | ||
4041 | return PCI_ERS_RESULT_DISCONNECT; | 4122 | return PCI_ERS_RESULT_DISCONNECT; |
4042 | 4123 | } | |
4043 | if (netif_running(ndev)) | ||
4044 | ql_adapter_down(qdev); | ||
4045 | |||
4046 | pci_disable_device(pdev); | ||
4047 | 4124 | ||
4048 | /* Request a slot reset. */ | 4125 | /* Request a slot reset. */ |
4049 | return PCI_ERS_RESULT_NEED_RESET; | 4126 | return PCI_ERS_RESULT_NEED_RESET; |
@@ -4060,25 +4137,15 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) | |||
4060 | struct net_device *ndev = pci_get_drvdata(pdev); | 4137 | struct net_device *ndev = pci_get_drvdata(pdev); |
4061 | struct ql_adapter *qdev = netdev_priv(ndev); | 4138 | struct ql_adapter *qdev = netdev_priv(ndev); |
4062 | 4139 | ||
4140 | pdev->error_state = pci_channel_io_normal; | ||
4141 | |||
4142 | pci_restore_state(pdev); | ||
4063 | if (pci_enable_device(pdev)) { | 4143 | if (pci_enable_device(pdev)) { |
4064 | QPRINTK(qdev, IFUP, ERR, | 4144 | QPRINTK(qdev, IFUP, ERR, |
4065 | "Cannot re-enable PCI device after reset.\n"); | 4145 | "Cannot re-enable PCI device after reset.\n"); |
4066 | return PCI_ERS_RESULT_DISCONNECT; | 4146 | return PCI_ERS_RESULT_DISCONNECT; |
4067 | } | 4147 | } |
4068 | |||
4069 | pci_set_master(pdev); | 4148 | pci_set_master(pdev); |
4070 | |||
4071 | netif_carrier_off(ndev); | ||
4072 | ql_adapter_reset(qdev); | ||
4073 | |||
4074 | /* Make sure the EEPROM is good */ | ||
4075 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); | ||
4076 | |||
4077 | if (!is_valid_ether_addr(ndev->perm_addr)) { | ||
4078 | QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n"); | ||
4079 | return PCI_ERS_RESULT_DISCONNECT; | ||
4080 | } | ||
4081 | |||
4082 | return PCI_ERS_RESULT_RECOVERED; | 4149 | return PCI_ERS_RESULT_RECOVERED; |
4083 | } | 4150 | } |
4084 | 4151 | ||
@@ -4086,17 +4153,21 @@ static void qlge_io_resume(struct pci_dev *pdev) | |||
4086 | { | 4153 | { |
4087 | struct net_device *ndev = pci_get_drvdata(pdev); | 4154 | struct net_device *ndev = pci_get_drvdata(pdev); |
4088 | struct ql_adapter *qdev = netdev_priv(ndev); | 4155 | struct ql_adapter *qdev = netdev_priv(ndev); |
4156 | int err = 0; | ||
4089 | 4157 | ||
4090 | pci_set_master(pdev); | 4158 | if (ql_adapter_reset(qdev)) |
4091 | 4159 | QPRINTK(qdev, DRV, ERR, "reset FAILED!\n"); | |
4092 | if (netif_running(ndev)) { | 4160 | if (netif_running(ndev)) { |
4093 | if (ql_adapter_up(qdev)) { | 4161 | err = qlge_open(ndev); |
4162 | if (err) { | ||
4094 | QPRINTK(qdev, IFUP, ERR, | 4163 | QPRINTK(qdev, IFUP, ERR, |
4095 | "Device initialization failed after reset.\n"); | 4164 | "Device initialization failed after reset.\n"); |
4096 | return; | 4165 | return; |
4097 | } | 4166 | } |
4167 | } else { | ||
4168 | QPRINTK(qdev, IFUP, ERR, | ||
4169 | "Device was not running prior to EEH.\n"); | ||
4098 | } | 4170 | } |
4099 | |||
4100 | netif_device_attach(ndev); | 4171 | netif_device_attach(ndev); |
4101 | } | 4172 | } |
4102 | 4173 | ||