diff options
Diffstat (limited to 'drivers/net/qlge/qlge_main.c')
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index fd34f266c0a8..fa4b24c49f42 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -1057,7 +1057,7 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, | |||
1057 | struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); | 1057 | struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); |
1058 | 1058 | ||
1059 | pci_dma_sync_single_for_cpu(qdev->pdev, | 1059 | pci_dma_sync_single_for_cpu(qdev->pdev, |
1060 | pci_unmap_addr(lbq_desc, mapaddr), | 1060 | dma_unmap_addr(lbq_desc, mapaddr), |
1061 | rx_ring->lbq_buf_size, | 1061 | rx_ring->lbq_buf_size, |
1062 | PCI_DMA_FROMDEVICE); | 1062 | PCI_DMA_FROMDEVICE); |
1063 | 1063 | ||
@@ -1170,8 +1170,8 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1170 | 1170 | ||
1171 | map = lbq_desc->p.pg_chunk.map + | 1171 | map = lbq_desc->p.pg_chunk.map + |
1172 | lbq_desc->p.pg_chunk.offset; | 1172 | lbq_desc->p.pg_chunk.offset; |
1173 | pci_unmap_addr_set(lbq_desc, mapaddr, map); | 1173 | dma_unmap_addr_set(lbq_desc, mapaddr, map); |
1174 | pci_unmap_len_set(lbq_desc, maplen, | 1174 | dma_unmap_len_set(lbq_desc, maplen, |
1175 | rx_ring->lbq_buf_size); | 1175 | rx_ring->lbq_buf_size); |
1176 | *lbq_desc->addr = cpu_to_le64(map); | 1176 | *lbq_desc->addr = cpu_to_le64(map); |
1177 | 1177 | ||
@@ -1241,8 +1241,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1241 | sbq_desc->p.skb = NULL; | 1241 | sbq_desc->p.skb = NULL; |
1242 | return; | 1242 | return; |
1243 | } | 1243 | } |
1244 | pci_unmap_addr_set(sbq_desc, mapaddr, map); | 1244 | dma_unmap_addr_set(sbq_desc, mapaddr, map); |
1245 | pci_unmap_len_set(sbq_desc, maplen, | 1245 | dma_unmap_len_set(sbq_desc, maplen, |
1246 | rx_ring->sbq_buf_size); | 1246 | rx_ring->sbq_buf_size); |
1247 | *sbq_desc->addr = cpu_to_le64(map); | 1247 | *sbq_desc->addr = cpu_to_le64(map); |
1248 | } | 1248 | } |
@@ -1298,18 +1298,18 @@ static void ql_unmap_send(struct ql_adapter *qdev, | |||
1298 | "unmapping OAL area.\n"); | 1298 | "unmapping OAL area.\n"); |
1299 | } | 1299 | } |
1300 | pci_unmap_single(qdev->pdev, | 1300 | pci_unmap_single(qdev->pdev, |
1301 | pci_unmap_addr(&tx_ring_desc->map[i], | 1301 | dma_unmap_addr(&tx_ring_desc->map[i], |
1302 | mapaddr), | 1302 | mapaddr), |
1303 | pci_unmap_len(&tx_ring_desc->map[i], | 1303 | dma_unmap_len(&tx_ring_desc->map[i], |
1304 | maplen), | 1304 | maplen), |
1305 | PCI_DMA_TODEVICE); | 1305 | PCI_DMA_TODEVICE); |
1306 | } else { | 1306 | } else { |
1307 | netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, | 1307 | netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, |
1308 | "unmapping frag %d.\n", i); | 1308 | "unmapping frag %d.\n", i); |
1309 | pci_unmap_page(qdev->pdev, | 1309 | pci_unmap_page(qdev->pdev, |
1310 | pci_unmap_addr(&tx_ring_desc->map[i], | 1310 | dma_unmap_addr(&tx_ring_desc->map[i], |
1311 | mapaddr), | 1311 | mapaddr), |
1312 | pci_unmap_len(&tx_ring_desc->map[i], | 1312 | dma_unmap_len(&tx_ring_desc->map[i], |
1313 | maplen), PCI_DMA_TODEVICE); | 1313 | maplen), PCI_DMA_TODEVICE); |
1314 | } | 1314 | } |
1315 | } | 1315 | } |
@@ -1348,8 +1348,8 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1348 | 1348 | ||
1349 | tbd->len = cpu_to_le32(len); | 1349 | tbd->len = cpu_to_le32(len); |
1350 | tbd->addr = cpu_to_le64(map); | 1350 | tbd->addr = cpu_to_le64(map); |
1351 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); | 1351 | dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); |
1352 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); | 1352 | dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); |
1353 | map_idx++; | 1353 | map_idx++; |
1354 | 1354 | ||
1355 | /* | 1355 | /* |
@@ -1402,9 +1402,9 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1402 | tbd->len = | 1402 | tbd->len = |
1403 | cpu_to_le32((sizeof(struct tx_buf_desc) * | 1403 | cpu_to_le32((sizeof(struct tx_buf_desc) * |
1404 | (frag_cnt - frag_idx)) | TX_DESC_C); | 1404 | (frag_cnt - frag_idx)) | TX_DESC_C); |
1405 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, | 1405 | dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, |
1406 | map); | 1406 | map); |
1407 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, | 1407 | dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, |
1408 | sizeof(struct oal)); | 1408 | sizeof(struct oal)); |
1409 | tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; | 1409 | tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; |
1410 | map_idx++; | 1410 | map_idx++; |
@@ -1425,8 +1425,8 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1425 | 1425 | ||
1426 | tbd->addr = cpu_to_le64(map); | 1426 | tbd->addr = cpu_to_le64(map); |
1427 | tbd->len = cpu_to_le32(frag->size); | 1427 | tbd->len = cpu_to_le32(frag->size); |
1428 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); | 1428 | dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); |
1429 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, | 1429 | dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, |
1430 | frag->size); | 1430 | frag->size); |
1431 | 1431 | ||
1432 | } | 1432 | } |
@@ -1742,8 +1742,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1742 | */ | 1742 | */ |
1743 | sbq_desc = ql_get_curr_sbuf(rx_ring); | 1743 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1744 | pci_unmap_single(qdev->pdev, | 1744 | pci_unmap_single(qdev->pdev, |
1745 | pci_unmap_addr(sbq_desc, mapaddr), | 1745 | dma_unmap_addr(sbq_desc, mapaddr), |
1746 | pci_unmap_len(sbq_desc, maplen), | 1746 | dma_unmap_len(sbq_desc, maplen), |
1747 | PCI_DMA_FROMDEVICE); | 1747 | PCI_DMA_FROMDEVICE); |
1748 | skb = sbq_desc->p.skb; | 1748 | skb = sbq_desc->p.skb; |
1749 | ql_realign_skb(skb, hdr_len); | 1749 | ql_realign_skb(skb, hdr_len); |
@@ -1774,18 +1774,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1774 | */ | 1774 | */ |
1775 | sbq_desc = ql_get_curr_sbuf(rx_ring); | 1775 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1776 | pci_dma_sync_single_for_cpu(qdev->pdev, | 1776 | pci_dma_sync_single_for_cpu(qdev->pdev, |
1777 | pci_unmap_addr | 1777 | dma_unmap_addr |
1778 | (sbq_desc, mapaddr), | 1778 | (sbq_desc, mapaddr), |
1779 | pci_unmap_len | 1779 | dma_unmap_len |
1780 | (sbq_desc, maplen), | 1780 | (sbq_desc, maplen), |
1781 | PCI_DMA_FROMDEVICE); | 1781 | PCI_DMA_FROMDEVICE); |
1782 | memcpy(skb_put(skb, length), | 1782 | memcpy(skb_put(skb, length), |
1783 | sbq_desc->p.skb->data, length); | 1783 | sbq_desc->p.skb->data, length); |
1784 | pci_dma_sync_single_for_device(qdev->pdev, | 1784 | pci_dma_sync_single_for_device(qdev->pdev, |
1785 | pci_unmap_addr | 1785 | dma_unmap_addr |
1786 | (sbq_desc, | 1786 | (sbq_desc, |
1787 | mapaddr), | 1787 | mapaddr), |
1788 | pci_unmap_len | 1788 | dma_unmap_len |
1789 | (sbq_desc, | 1789 | (sbq_desc, |
1790 | maplen), | 1790 | maplen), |
1791 | PCI_DMA_FROMDEVICE); | 1791 | PCI_DMA_FROMDEVICE); |
@@ -1798,9 +1798,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1798 | ql_realign_skb(skb, length); | 1798 | ql_realign_skb(skb, length); |
1799 | skb_put(skb, length); | 1799 | skb_put(skb, length); |
1800 | pci_unmap_single(qdev->pdev, | 1800 | pci_unmap_single(qdev->pdev, |
1801 | pci_unmap_addr(sbq_desc, | 1801 | dma_unmap_addr(sbq_desc, |
1802 | mapaddr), | 1802 | mapaddr), |
1803 | pci_unmap_len(sbq_desc, | 1803 | dma_unmap_len(sbq_desc, |
1804 | maplen), | 1804 | maplen), |
1805 | PCI_DMA_FROMDEVICE); | 1805 | PCI_DMA_FROMDEVICE); |
1806 | sbq_desc->p.skb = NULL; | 1806 | sbq_desc->p.skb = NULL; |
@@ -1839,9 +1839,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1839 | return NULL; | 1839 | return NULL; |
1840 | } | 1840 | } |
1841 | pci_unmap_page(qdev->pdev, | 1841 | pci_unmap_page(qdev->pdev, |
1842 | pci_unmap_addr(lbq_desc, | 1842 | dma_unmap_addr(lbq_desc, |
1843 | mapaddr), | 1843 | mapaddr), |
1844 | pci_unmap_len(lbq_desc, maplen), | 1844 | dma_unmap_len(lbq_desc, maplen), |
1845 | PCI_DMA_FROMDEVICE); | 1845 | PCI_DMA_FROMDEVICE); |
1846 | skb_reserve(skb, NET_IP_ALIGN); | 1846 | skb_reserve(skb, NET_IP_ALIGN); |
1847 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, | 1847 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
@@ -1874,8 +1874,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1874 | int size, i = 0; | 1874 | int size, i = 0; |
1875 | sbq_desc = ql_get_curr_sbuf(rx_ring); | 1875 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1876 | pci_unmap_single(qdev->pdev, | 1876 | pci_unmap_single(qdev->pdev, |
1877 | pci_unmap_addr(sbq_desc, mapaddr), | 1877 | dma_unmap_addr(sbq_desc, mapaddr), |
1878 | pci_unmap_len(sbq_desc, maplen), | 1878 | dma_unmap_len(sbq_desc, maplen), |
1879 | PCI_DMA_FROMDEVICE); | 1879 | PCI_DMA_FROMDEVICE); |
1880 | if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { | 1880 | if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { |
1881 | /* | 1881 | /* |
@@ -2737,8 +2737,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring | |||
2737 | } | 2737 | } |
2738 | if (sbq_desc->p.skb) { | 2738 | if (sbq_desc->p.skb) { |
2739 | pci_unmap_single(qdev->pdev, | 2739 | pci_unmap_single(qdev->pdev, |
2740 | pci_unmap_addr(sbq_desc, mapaddr), | 2740 | dma_unmap_addr(sbq_desc, mapaddr), |
2741 | pci_unmap_len(sbq_desc, maplen), | 2741 | dma_unmap_len(sbq_desc, maplen), |
2742 | PCI_DMA_FROMDEVICE); | 2742 | PCI_DMA_FROMDEVICE); |
2743 | dev_kfree_skb(sbq_desc->p.skb); | 2743 | dev_kfree_skb(sbq_desc->p.skb); |
2744 | sbq_desc->p.skb = NULL; | 2744 | sbq_desc->p.skb = NULL; |
@@ -4207,7 +4207,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device | |||
4207 | static void qlge_set_multicast_list(struct net_device *ndev) | 4207 | static void qlge_set_multicast_list(struct net_device *ndev) |
4208 | { | 4208 | { |
4209 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); | 4209 | struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); |
4210 | struct dev_mc_list *mc_ptr; | 4210 | struct netdev_hw_addr *ha; |
4211 | int i, status; | 4211 | int i, status; |
4212 | 4212 | ||
4213 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); | 4213 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); |
@@ -4271,8 +4271,8 @@ static void qlge_set_multicast_list(struct net_device *ndev) | |||
4271 | if (status) | 4271 | if (status) |
4272 | goto exit; | 4272 | goto exit; |
4273 | i = 0; | 4273 | i = 0; |
4274 | netdev_for_each_mc_addr(mc_ptr, ndev) { | 4274 | netdev_for_each_mc_addr(ha, ndev) { |
4275 | if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, | 4275 | if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, |
4276 | MAC_ADDR_TYPE_MULTI_MAC, i)) { | 4276 | MAC_ADDR_TYPE_MULTI_MAC, i)) { |
4277 | netif_err(qdev, hw, qdev->ndev, | 4277 | netif_err(qdev, hw, qdev->ndev, |
4278 | "Failed to loadmulticast address.\n"); | 4278 | "Failed to loadmulticast address.\n"); |