diff options
author | Stephen Hemminger <shemminger@vyatta.com> | 2008-11-21 20:29:50 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-21 20:29:50 -0500 |
commit | 8668ae9241de085f046aa14fa3a97654a31a06e3 (patch) | |
tree | 9b1feee50c5217e4b978bc08776930785b4299d4 /drivers/net/qlge | |
parent | 25ed784910e43e61b5642dd8d2d8c13384a0d290 (diff) |
qlge: fix sparse warnings
Fix sparse warnings and one bug:
* Several routines can be static
* Don't lose __iomem annotation
* fix locking on error path (bug)
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 27 |
1 files changed, 14 insertions, 13 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 713b793f18aa..225930fda5af 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -642,7 +642,7 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) | |||
642 | 642 | ||
643 | } | 643 | } |
644 | 644 | ||
645 | int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data) | 645 | static int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data) |
646 | { | 646 | { |
647 | int status = 0; | 647 | int status = 0; |
648 | /* wait for reg to come ready */ | 648 | /* wait for reg to come ready */ |
@@ -832,7 +832,7 @@ end: | |||
832 | } | 832 | } |
833 | 833 | ||
834 | /* Get the next large buffer. */ | 834 | /* Get the next large buffer. */ |
835 | struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) | 835 | static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) |
836 | { | 836 | { |
837 | struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; | 837 | struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; |
838 | rx_ring->lbq_curr_idx++; | 838 | rx_ring->lbq_curr_idx++; |
@@ -843,7 +843,7 @@ struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) | |||
843 | } | 843 | } |
844 | 844 | ||
845 | /* Get the next small buffer. */ | 845 | /* Get the next small buffer. */ |
846 | struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) | 846 | static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) |
847 | { | 847 | { |
848 | struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; | 848 | struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; |
849 | rx_ring->sbq_curr_idx++; | 849 | rx_ring->sbq_curr_idx++; |
@@ -1166,7 +1166,7 @@ map_error: | |||
1166 | return NETDEV_TX_BUSY; | 1166 | return NETDEV_TX_BUSY; |
1167 | } | 1167 | } |
1168 | 1168 | ||
1169 | void ql_realign_skb(struct sk_buff *skb, int len) | 1169 | static void ql_realign_skb(struct sk_buff *skb, int len) |
1170 | { | 1170 | { |
1171 | void *temp_addr = skb->data; | 1171 | void *temp_addr = skb->data; |
1172 | 1172 | ||
@@ -2069,7 +2069,7 @@ err: | |||
2069 | return -ENOMEM; | 2069 | return -ENOMEM; |
2070 | } | 2070 | } |
2071 | 2071 | ||
2072 | void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) | 2072 | static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) |
2073 | { | 2073 | { |
2074 | int i; | 2074 | int i; |
2075 | struct bq_desc *lbq_desc; | 2075 | struct bq_desc *lbq_desc; |
@@ -2132,7 +2132,7 @@ mem_error: | |||
2132 | return -ENOMEM; | 2132 | return -ENOMEM; |
2133 | } | 2133 | } |
2134 | 2134 | ||
2135 | void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) | 2135 | static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) |
2136 | { | 2136 | { |
2137 | int i; | 2137 | int i; |
2138 | struct bq_desc *sbq_desc; | 2138 | struct bq_desc *sbq_desc; |
@@ -2467,7 +2467,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2467 | rx_ring->sbq_base_indirect_dma = shadow_reg_dma; | 2467 | rx_ring->sbq_base_indirect_dma = shadow_reg_dma; |
2468 | 2468 | ||
2469 | /* PCI doorbell mem area + 0x00 for consumer index register */ | 2469 | /* PCI doorbell mem area + 0x00 for consumer index register */ |
2470 | rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area; | 2470 | rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; |
2471 | rx_ring->cnsmr_idx = 0; | 2471 | rx_ring->cnsmr_idx = 0; |
2472 | rx_ring->curr_entry = rx_ring->cq_base; | 2472 | rx_ring->curr_entry = rx_ring->cq_base; |
2473 | 2473 | ||
@@ -2475,10 +2475,10 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2475 | rx_ring->valid_db_reg = doorbell_area + 0x04; | 2475 | rx_ring->valid_db_reg = doorbell_area + 0x04; |
2476 | 2476 | ||
2477 | /* PCI doorbell mem area + 0x18 for large buffer consumer */ | 2477 | /* PCI doorbell mem area + 0x18 for large buffer consumer */ |
2478 | rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18); | 2478 | rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); |
2479 | 2479 | ||
2480 | /* PCI doorbell mem area + 0x1c */ | 2480 | /* PCI doorbell mem area + 0x1c */ |
2481 | rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c); | 2481 | rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); |
2482 | 2482 | ||
2483 | memset((void *)cqicb, 0, sizeof(struct cqicb)); | 2483 | memset((void *)cqicb, 0, sizeof(struct cqicb)); |
2484 | cqicb->msix_vect = rx_ring->irq; | 2484 | cqicb->msix_vect = rx_ring->irq; |
@@ -2609,7 +2609,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | |||
2609 | * Assign doorbell registers for this tx_ring. | 2609 | * Assign doorbell registers for this tx_ring. |
2610 | */ | 2610 | */ |
2611 | /* TX PCI doorbell mem area for tx producer index */ | 2611 | /* TX PCI doorbell mem area for tx producer index */ |
2612 | tx_ring->prod_idx_db_reg = (u32 *) doorbell_area; | 2612 | tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; |
2613 | tx_ring->prod_idx = 0; | 2613 | tx_ring->prod_idx = 0; |
2614 | /* TX PCI doorbell mem area + 0x04 */ | 2614 | /* TX PCI doorbell mem area + 0x04 */ |
2615 | tx_ring->valid_db_reg = doorbell_area + 0x04; | 2615 | tx_ring->valid_db_reg = doorbell_area + 0x04; |
@@ -3520,6 +3520,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) | |||
3520 | { | 3520 | { |
3521 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | 3521 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); |
3522 | struct sockaddr *addr = p; | 3522 | struct sockaddr *addr = p; |
3523 | int ret = 0; | ||
3523 | 3524 | ||
3524 | if (netif_running(ndev)) | 3525 | if (netif_running(ndev)) |
3525 | return -EBUSY; | 3526 | return -EBUSY; |
@@ -3532,11 +3533,11 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) | |||
3532 | if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, | 3533 | if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, |
3533 | MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */ | 3534 | MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */ |
3534 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); | 3535 | QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); |
3535 | return -1; | 3536 | ret = -1; |
3536 | } | 3537 | } |
3537 | spin_unlock(&qdev->hw_lock); | 3538 | spin_unlock(&qdev->hw_lock); |
3538 | 3539 | ||
3539 | return 0; | 3540 | return ret; |
3540 | } | 3541 | } |
3541 | 3542 | ||
3542 | static void qlge_tx_timeout(struct net_device *ndev) | 3543 | static void qlge_tx_timeout(struct net_device *ndev) |
@@ -3586,7 +3587,7 @@ static void ql_release_all(struct pci_dev *pdev) | |||
3586 | qdev->q_workqueue = NULL; | 3587 | qdev->q_workqueue = NULL; |
3587 | } | 3588 | } |
3588 | if (qdev->reg_base) | 3589 | if (qdev->reg_base) |
3589 | iounmap((void *)qdev->reg_base); | 3590 | iounmap(qdev->reg_base); |
3590 | if (qdev->doorbell_area) | 3591 | if (qdev->doorbell_area) |
3591 | iounmap(qdev->doorbell_area); | 3592 | iounmap(qdev->doorbell_area); |
3592 | pci_release_regions(pdev); | 3593 | pci_release_regions(pdev); |