aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge/qlge_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge/qlge_main.c')
-rw-r--r--drivers/net/qlge/qlge_main.c153
1 files changed, 114 insertions, 39 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 90d1f76c0e8b..5768af17f168 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -214,6 +214,10 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
214 return -ENOMEM; 214 return -ENOMEM;
215 } 215 }
216 216
217 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
218 if (status)
219 return status;
220
217 status = ql_wait_cfg(qdev, bit); 221 status = ql_wait_cfg(qdev, bit);
218 if (status) { 222 if (status) {
219 QPRINTK(qdev, IFUP, ERR, 223 QPRINTK(qdev, IFUP, ERR,
@@ -221,12 +225,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
221 goto exit; 225 goto exit;
222 } 226 }
223 227
224 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
225 if (status)
226 goto exit;
227 ql_write32(qdev, ICB_L, (u32) map); 228 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32)); 229 ql_write32(qdev, ICB_H, (u32) (map >> 32));
229 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
230 230
231 mask = CFG_Q_MASK | (bit << 16); 231 mask = CFG_Q_MASK | (bit << 16);
232 value = bit | (q_id << CFG_Q_SHIFT); 232 value = bit | (q_id << CFG_Q_SHIFT);
@@ -237,6 +237,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
237 */ 237 */
238 status = ql_wait_cfg(qdev, bit); 238 status = ql_wait_cfg(qdev, bit);
239exit: 239exit:
240 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
240 pci_unmap_single(qdev->pdev, map, size, direction); 241 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status; 242 return status;
242} 243}
@@ -412,6 +413,57 @@ exit:
412 return status; 413 return status;
413} 414}
414 415
416/* Set or clear MAC address in hardware. We sometimes
417 * have to clear it to prevent wrong frame routing
418 * especially in a bonding environment.
419 */
420static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
421{
422 int status;
423 char zero_mac_addr[ETH_ALEN];
424 char *addr;
425
426 if (set) {
427 addr = &qdev->ndev->dev_addr[0];
428 QPRINTK(qdev, IFUP, DEBUG,
429 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
430 addr[0], addr[1], addr[2], addr[3],
431 addr[4], addr[5]);
432 } else {
433 memset(zero_mac_addr, 0, ETH_ALEN);
434 addr = &zero_mac_addr[0];
435 QPRINTK(qdev, IFUP, DEBUG,
436 "Clearing MAC address on %s\n",
437 qdev->ndev->name);
438 }
439 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
440 if (status)
441 return status;
442 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
443 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
444 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
445 if (status)
446 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
447 "address.\n");
448 return status;
449}
450
451void ql_link_on(struct ql_adapter *qdev)
452{
453 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
454 qdev->ndev->name);
455 netif_carrier_on(qdev->ndev);
456 ql_set_mac_addr(qdev, 1);
457}
458
459void ql_link_off(struct ql_adapter *qdev)
460{
461 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
462 qdev->ndev->name);
463 netif_carrier_off(qdev->ndev);
464 ql_set_mac_addr(qdev, 0);
465}
466
415/* Get a specific frame routing value from the CAM. 467/* Get a specific frame routing value from the CAM.
416 * Used for debug and reg dump. 468 * Used for debug and reg dump.
417 */ 469 */
@@ -1628,7 +1680,7 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1628 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; 1680 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1629 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; 1681 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1630 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); 1682 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1631 qdev->stats.tx_bytes += tx_ring_desc->map_cnt; 1683 qdev->stats.tx_bytes += (tx_ring_desc->skb)->len;
1632 qdev->stats.tx_packets++; 1684 qdev->stats.tx_packets++;
1633 dev_kfree_skb(tx_ring_desc->skb); 1685 dev_kfree_skb(tx_ring_desc->skb);
1634 tx_ring_desc->skb = NULL; 1686 tx_ring_desc->skb = NULL;
@@ -1660,13 +1712,13 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1660/* Fire up a handler to reset the MPI processor. */ 1712/* Fire up a handler to reset the MPI processor. */
1661void ql_queue_fw_error(struct ql_adapter *qdev) 1713void ql_queue_fw_error(struct ql_adapter *qdev)
1662{ 1714{
1663 netif_carrier_off(qdev->ndev); 1715 ql_link_off(qdev);
1664 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); 1716 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
1665} 1717}
1666 1718
1667void ql_queue_asic_error(struct ql_adapter *qdev) 1719void ql_queue_asic_error(struct ql_adapter *qdev)
1668{ 1720{
1669 netif_carrier_off(qdev->ndev); 1721 ql_link_off(qdev);
1670 ql_disable_interrupts(qdev); 1722 ql_disable_interrupts(qdev);
1671 /* Clear adapter up bit to signal the recovery 1723 /* Clear adapter up bit to signal the recovery
1672 * process that it shouldn't kill the reset worker 1724 * process that it shouldn't kill the reset worker
@@ -2104,7 +2156,7 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
2104 } 2156 }
2105 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; 2157 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2106 mac_iocb_ptr = tx_ring_desc->queue_entry; 2158 mac_iocb_ptr = tx_ring_desc->queue_entry;
2107 memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr)); 2159 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2108 2160
2109 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; 2161 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2110 mac_iocb_ptr->tid = tx_ring_desc->index; 2162 mac_iocb_ptr->tid = tx_ring_desc->index;
@@ -2743,7 +2795,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2743 2795
2744 ql_init_tx_ring(qdev, tx_ring); 2796 ql_init_tx_ring(qdev, tx_ring);
2745 2797
2746 err = ql_write_cfg(qdev, wqicb, sizeof(wqicb), CFG_LRQ, 2798 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
2747 (u16) tx_ring->wq_id); 2799 (u16) tx_ring->wq_id);
2748 if (err) { 2800 if (err) {
2749 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n"); 2801 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
@@ -3008,7 +3060,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
3008 int i; 3060 int i;
3009 u8 *hash_id = (u8 *) ricb->hash_cq_id; 3061 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3010 3062
3011 memset((void *)ricb, 0, sizeof(ricb)); 3063 memset((void *)ricb, 0, sizeof(*ricb));
3012 3064
3013 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K; 3065 ricb->base_cq = qdev->rss_ring_first_cq_id | RSS_L4K;
3014 ricb->flags = 3066 ricb->flags =
@@ -3030,7 +3082,7 @@ static int ql_start_rss(struct ql_adapter *qdev)
3030 3082
3031 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); 3083 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
3032 3084
3033 status = ql_write_cfg(qdev, ricb, sizeof(ricb), CFG_LR, 0); 3085 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3034 if (status) { 3086 if (status) {
3035 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n"); 3087 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3036 return status; 3088 return status;
@@ -3039,25 +3091,40 @@ static int ql_start_rss(struct ql_adapter *qdev)
3039 return status; 3091 return status;
3040} 3092}
3041 3093
3042/* Initialize the frame-to-queue routing. */ 3094static int ql_clear_routing_entries(struct ql_adapter *qdev)
3043static int ql_route_initialize(struct ql_adapter *qdev)
3044{ 3095{
3045 int status = 0; 3096 int i, status = 0;
3046 int i;
3047 3097
3048 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3098 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3049 if (status) 3099 if (status)
3050 return status; 3100 return status;
3051
3052 /* Clear all the entries in the routing table. */ 3101 /* Clear all the entries in the routing table. */
3053 for (i = 0; i < 16; i++) { 3102 for (i = 0; i < 16; i++) {
3054 status = ql_set_routing_reg(qdev, i, 0, 0); 3103 status = ql_set_routing_reg(qdev, i, 0, 0);
3055 if (status) { 3104 if (status) {
3056 QPRINTK(qdev, IFUP, ERR, 3105 QPRINTK(qdev, IFUP, ERR,
3057 "Failed to init routing register for CAM packets.\n"); 3106 "Failed to init routing register for CAM "
3058 goto exit; 3107 "packets.\n");
3108 break;
3059 } 3109 }
3060 } 3110 }
3111 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3112 return status;
3113}
3114
3115/* Initialize the frame-to-queue routing. */
3116static int ql_route_initialize(struct ql_adapter *qdev)
3117{
3118 int status = 0;
3119
3120 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3121 if (status)
3122 return status;
3123
3124 /* Clear all the entries in the routing table. */
3125 status = ql_clear_routing_entries(qdev);
3126 if (status)
3127 goto exit;
3061 3128
3062 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1); 3129 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3063 if (status) { 3130 if (status) {
@@ -3096,14 +3163,15 @@ exit:
3096 3163
3097int ql_cam_route_initialize(struct ql_adapter *qdev) 3164int ql_cam_route_initialize(struct ql_adapter *qdev)
3098{ 3165{
3099 int status; 3166 int status, set;
3100 3167
3101 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 3168 /* If check if the link is up and use to
3102 if (status) 3169 * determine if we are setting or clearing
3103 return status; 3170 * the MAC address in the CAM.
3104 status = ql_set_mac_addr_reg(qdev, (u8 *) qdev->ndev->perm_addr, 3171 */
3105 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); 3172 set = ql_read32(qdev, STS);
3106 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 3173 set &= qdev->port_link_up;
3174 status = ql_set_mac_addr(qdev, set);
3107 if (status) { 3175 if (status) {
3108 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n"); 3176 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3109 return status; 3177 return status;
@@ -3210,9 +3278,17 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3210{ 3278{
3211 u32 value; 3279 u32 value;
3212 int status = 0; 3280 int status = 0;
3213 unsigned long end_jiffies = jiffies + 3281 unsigned long end_jiffies;
3214 max((unsigned long)1, usecs_to_jiffies(30));
3215 3282
3283 /* Clear all the entries in the routing table. */
3284 status = ql_clear_routing_entries(qdev);
3285 if (status) {
3286 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3287 return status;
3288 }
3289
3290 end_jiffies = jiffies +
3291 max((unsigned long)1, usecs_to_jiffies(30));
3216 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); 3292 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3217 3293
3218 do { 3294 do {
@@ -3252,7 +3328,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3252 int i, status = 0; 3328 int i, status = 0;
3253 struct rx_ring *rx_ring; 3329 struct rx_ring *rx_ring;
3254 3330
3255 netif_carrier_off(qdev->ndev); 3331 ql_link_off(qdev);
3256 3332
3257 /* Don't kill the reset worker thread if we 3333 /* Don't kill the reset worker thread if we
3258 * are in the process of recovery. 3334 * are in the process of recovery.
@@ -3319,8 +3395,12 @@ static int ql_adapter_up(struct ql_adapter *qdev)
3319 } 3395 }
3320 set_bit(QL_ADAPTER_UP, &qdev->flags); 3396 set_bit(QL_ADAPTER_UP, &qdev->flags);
3321 ql_alloc_rx_buffers(qdev); 3397 ql_alloc_rx_buffers(qdev);
3322 if ((ql_read32(qdev, STS) & qdev->port_init)) 3398 /* If the port is initialized and the
3323 netif_carrier_on(qdev->ndev); 3399 * link is up the turn on the carrier.
3400 */
3401 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3402 (ql_read32(qdev, STS) & qdev->port_link_up))
3403 ql_link_on(qdev);
3324 ql_enable_interrupts(qdev); 3404 ql_enable_interrupts(qdev);
3325 ql_enable_all_completion_interrupts(qdev); 3405 ql_enable_all_completion_interrupts(qdev);
3326 netif_tx_start_all_queues(qdev->ndev); 3406 netif_tx_start_all_queues(qdev->ndev);
@@ -3346,11 +3426,6 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
3346 return -ENOMEM; 3426 return -ENOMEM;
3347 } 3427 }
3348 status = ql_request_irq(qdev); 3428 status = ql_request_irq(qdev);
3349 if (status)
3350 goto err_irq;
3351 return status;
3352err_irq:
3353 ql_free_mem_resources(qdev);
3354 return status; 3429 return status;
3355} 3430}
3356 3431
@@ -3414,7 +3489,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3414 3489
3415 for (i = 0; i < qdev->tx_ring_count; i++) { 3490 for (i = 0; i < qdev->tx_ring_count; i++) {
3416 tx_ring = &qdev->tx_ring[i]; 3491 tx_ring = &qdev->tx_ring[i];
3417 memset((void *)tx_ring, 0, sizeof(tx_ring)); 3492 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3418 tx_ring->qdev = qdev; 3493 tx_ring->qdev = qdev;
3419 tx_ring->wq_id = i; 3494 tx_ring->wq_id = i;
3420 tx_ring->wq_len = qdev->tx_ring_size; 3495 tx_ring->wq_len = qdev->tx_ring_size;
@@ -3430,7 +3505,7 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3430 3505
3431 for (i = 0; i < qdev->rx_ring_count; i++) { 3506 for (i = 0; i < qdev->rx_ring_count; i++) {
3432 rx_ring = &qdev->rx_ring[i]; 3507 rx_ring = &qdev->rx_ring[i];
3433 memset((void *)rx_ring, 0, sizeof(rx_ring)); 3508 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3434 rx_ring->qdev = qdev; 3509 rx_ring->qdev = qdev;
3435 rx_ring->cq_id = i; 3510 rx_ring->cq_id = i;
3436 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ 3511 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
@@ -3789,7 +3864,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3789 int pos, err = 0; 3864 int pos, err = 0;
3790 u16 val16; 3865 u16 val16;
3791 3866
3792 memset((void *)qdev, 0, sizeof(qdev)); 3867 memset((void *)qdev, 0, sizeof(*qdev));
3793 err = pci_enable_device(pdev); 3868 err = pci_enable_device(pdev);
3794 if (err) { 3869 if (err) {
3795 dev_err(&pdev->dev, "PCI device enable failed.\n"); 3870 dev_err(&pdev->dev, "PCI device enable failed.\n");
@@ -3976,7 +4051,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
3976 pci_disable_device(pdev); 4051 pci_disable_device(pdev);
3977 return err; 4052 return err;
3978 } 4053 }
3979 netif_carrier_off(ndev); 4054 ql_link_off(qdev);
3980 ql_display_dev_info(ndev); 4055 ql_display_dev_info(ndev);
3981 cards_found++; 4056 cards_found++;
3982 return 0; 4057 return 0;