diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2010-04-12 10:32:14 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-13 05:54:19 -0400 |
commit | 64b9b41de87e86129d055a584ee9e16bce97573a (patch) | |
tree | b636305ef64cb1b3e96a6043606f66973f34f36b /drivers/net/qlge | |
parent | 56e3b9df1376fa865ae929909b02f6840207520f (diff) |
qlge: use the DMA state API instead of the pci equivalents
This replace the PCI DMA state API (include/linux/pci-dma.h) with the
DMA equivalents since the PCI DMA state API will be obsolete.
No functional change.
For further information about the background:
http://marc.info/?l=linux-netdev&m=127037540020276&w=2
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r-- | drivers/net/qlge/qlge.h | 8 | ||||
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 58 |
2 files changed, 33 insertions, 33 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 8b742b639ceb..20624ba44a37 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
@@ -1344,8 +1344,8 @@ struct oal { | |||
1344 | }; | 1344 | }; |
1345 | 1345 | ||
1346 | struct map_list { | 1346 | struct map_list { |
1347 | DECLARE_PCI_UNMAP_ADDR(mapaddr); | 1347 | DEFINE_DMA_UNMAP_ADDR(mapaddr); |
1348 | DECLARE_PCI_UNMAP_LEN(maplen); | 1348 | DEFINE_DMA_UNMAP_LEN(maplen); |
1349 | }; | 1349 | }; |
1350 | 1350 | ||
1351 | struct tx_ring_desc { | 1351 | struct tx_ring_desc { |
@@ -1373,8 +1373,8 @@ struct bq_desc { | |||
1373 | } p; | 1373 | } p; |
1374 | __le64 *addr; | 1374 | __le64 *addr; |
1375 | u32 index; | 1375 | u32 index; |
1376 | DECLARE_PCI_UNMAP_ADDR(mapaddr); | 1376 | DEFINE_DMA_UNMAP_ADDR(mapaddr); |
1377 | DECLARE_PCI_UNMAP_LEN(maplen); | 1377 | DEFINE_DMA_UNMAP_LEN(maplen); |
1378 | }; | 1378 | }; |
1379 | 1379 | ||
1380 | #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) | 1380 | #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 76df96813a7b..fa4b24c49f42 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -1057,7 +1057,7 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, | |||
1057 | struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); | 1057 | struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); |
1058 | 1058 | ||
1059 | pci_dma_sync_single_for_cpu(qdev->pdev, | 1059 | pci_dma_sync_single_for_cpu(qdev->pdev, |
1060 | pci_unmap_addr(lbq_desc, mapaddr), | 1060 | dma_unmap_addr(lbq_desc, mapaddr), |
1061 | rx_ring->lbq_buf_size, | 1061 | rx_ring->lbq_buf_size, |
1062 | PCI_DMA_FROMDEVICE); | 1062 | PCI_DMA_FROMDEVICE); |
1063 | 1063 | ||
@@ -1170,8 +1170,8 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1170 | 1170 | ||
1171 | map = lbq_desc->p.pg_chunk.map + | 1171 | map = lbq_desc->p.pg_chunk.map + |
1172 | lbq_desc->p.pg_chunk.offset; | 1172 | lbq_desc->p.pg_chunk.offset; |
1173 | pci_unmap_addr_set(lbq_desc, mapaddr, map); | 1173 | dma_unmap_addr_set(lbq_desc, mapaddr, map); |
1174 | pci_unmap_len_set(lbq_desc, maplen, | 1174 | dma_unmap_len_set(lbq_desc, maplen, |
1175 | rx_ring->lbq_buf_size); | 1175 | rx_ring->lbq_buf_size); |
1176 | *lbq_desc->addr = cpu_to_le64(map); | 1176 | *lbq_desc->addr = cpu_to_le64(map); |
1177 | 1177 | ||
@@ -1241,8 +1241,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
1241 | sbq_desc->p.skb = NULL; | 1241 | sbq_desc->p.skb = NULL; |
1242 | return; | 1242 | return; |
1243 | } | 1243 | } |
1244 | pci_unmap_addr_set(sbq_desc, mapaddr, map); | 1244 | dma_unmap_addr_set(sbq_desc, mapaddr, map); |
1245 | pci_unmap_len_set(sbq_desc, maplen, | 1245 | dma_unmap_len_set(sbq_desc, maplen, |
1246 | rx_ring->sbq_buf_size); | 1246 | rx_ring->sbq_buf_size); |
1247 | *sbq_desc->addr = cpu_to_le64(map); | 1247 | *sbq_desc->addr = cpu_to_le64(map); |
1248 | } | 1248 | } |
@@ -1298,18 +1298,18 @@ static void ql_unmap_send(struct ql_adapter *qdev, | |||
1298 | "unmapping OAL area.\n"); | 1298 | "unmapping OAL area.\n"); |
1299 | } | 1299 | } |
1300 | pci_unmap_single(qdev->pdev, | 1300 | pci_unmap_single(qdev->pdev, |
1301 | pci_unmap_addr(&tx_ring_desc->map[i], | 1301 | dma_unmap_addr(&tx_ring_desc->map[i], |
1302 | mapaddr), | 1302 | mapaddr), |
1303 | pci_unmap_len(&tx_ring_desc->map[i], | 1303 | dma_unmap_len(&tx_ring_desc->map[i], |
1304 | maplen), | 1304 | maplen), |
1305 | PCI_DMA_TODEVICE); | 1305 | PCI_DMA_TODEVICE); |
1306 | } else { | 1306 | } else { |
1307 | netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, | 1307 | netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, |
1308 | "unmapping frag %d.\n", i); | 1308 | "unmapping frag %d.\n", i); |
1309 | pci_unmap_page(qdev->pdev, | 1309 | pci_unmap_page(qdev->pdev, |
1310 | pci_unmap_addr(&tx_ring_desc->map[i], | 1310 | dma_unmap_addr(&tx_ring_desc->map[i], |
1311 | mapaddr), | 1311 | mapaddr), |
1312 | pci_unmap_len(&tx_ring_desc->map[i], | 1312 | dma_unmap_len(&tx_ring_desc->map[i], |
1313 | maplen), PCI_DMA_TODEVICE); | 1313 | maplen), PCI_DMA_TODEVICE); |
1314 | } | 1314 | } |
1315 | } | 1315 | } |
@@ -1348,8 +1348,8 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1348 | 1348 | ||
1349 | tbd->len = cpu_to_le32(len); | 1349 | tbd->len = cpu_to_le32(len); |
1350 | tbd->addr = cpu_to_le64(map); | 1350 | tbd->addr = cpu_to_le64(map); |
1351 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); | 1351 | dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); |
1352 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); | 1352 | dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); |
1353 | map_idx++; | 1353 | map_idx++; |
1354 | 1354 | ||
1355 | /* | 1355 | /* |
@@ -1402,9 +1402,9 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1402 | tbd->len = | 1402 | tbd->len = |
1403 | cpu_to_le32((sizeof(struct tx_buf_desc) * | 1403 | cpu_to_le32((sizeof(struct tx_buf_desc) * |
1404 | (frag_cnt - frag_idx)) | TX_DESC_C); | 1404 | (frag_cnt - frag_idx)) | TX_DESC_C); |
1405 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, | 1405 | dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, |
1406 | map); | 1406 | map); |
1407 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, | 1407 | dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, |
1408 | sizeof(struct oal)); | 1408 | sizeof(struct oal)); |
1409 | tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; | 1409 | tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; |
1410 | map_idx++; | 1410 | map_idx++; |
@@ -1425,8 +1425,8 @@ static int ql_map_send(struct ql_adapter *qdev, | |||
1425 | 1425 | ||
1426 | tbd->addr = cpu_to_le64(map); | 1426 | tbd->addr = cpu_to_le64(map); |
1427 | tbd->len = cpu_to_le32(frag->size); | 1427 | tbd->len = cpu_to_le32(frag->size); |
1428 | pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); | 1428 | dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); |
1429 | pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, | 1429 | dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, |
1430 | frag->size); | 1430 | frag->size); |
1431 | 1431 | ||
1432 | } | 1432 | } |
@@ -1742,8 +1742,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1742 | */ | 1742 | */ |
1743 | sbq_desc = ql_get_curr_sbuf(rx_ring); | 1743 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1744 | pci_unmap_single(qdev->pdev, | 1744 | pci_unmap_single(qdev->pdev, |
1745 | pci_unmap_addr(sbq_desc, mapaddr), | 1745 | dma_unmap_addr(sbq_desc, mapaddr), |
1746 | pci_unmap_len(sbq_desc, maplen), | 1746 | dma_unmap_len(sbq_desc, maplen), |
1747 | PCI_DMA_FROMDEVICE); | 1747 | PCI_DMA_FROMDEVICE); |
1748 | skb = sbq_desc->p.skb; | 1748 | skb = sbq_desc->p.skb; |
1749 | ql_realign_skb(skb, hdr_len); | 1749 | ql_realign_skb(skb, hdr_len); |
@@ -1774,18 +1774,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1774 | */ | 1774 | */ |
1775 | sbq_desc = ql_get_curr_sbuf(rx_ring); | 1775 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1776 | pci_dma_sync_single_for_cpu(qdev->pdev, | 1776 | pci_dma_sync_single_for_cpu(qdev->pdev, |
1777 | pci_unmap_addr | 1777 | dma_unmap_addr |
1778 | (sbq_desc, mapaddr), | 1778 | (sbq_desc, mapaddr), |
1779 | pci_unmap_len | 1779 | dma_unmap_len |
1780 | (sbq_desc, maplen), | 1780 | (sbq_desc, maplen), |
1781 | PCI_DMA_FROMDEVICE); | 1781 | PCI_DMA_FROMDEVICE); |
1782 | memcpy(skb_put(skb, length), | 1782 | memcpy(skb_put(skb, length), |
1783 | sbq_desc->p.skb->data, length); | 1783 | sbq_desc->p.skb->data, length); |
1784 | pci_dma_sync_single_for_device(qdev->pdev, | 1784 | pci_dma_sync_single_for_device(qdev->pdev, |
1785 | pci_unmap_addr | 1785 | dma_unmap_addr |
1786 | (sbq_desc, | 1786 | (sbq_desc, |
1787 | mapaddr), | 1787 | mapaddr), |
1788 | pci_unmap_len | 1788 | dma_unmap_len |
1789 | (sbq_desc, | 1789 | (sbq_desc, |
1790 | maplen), | 1790 | maplen), |
1791 | PCI_DMA_FROMDEVICE); | 1791 | PCI_DMA_FROMDEVICE); |
@@ -1798,9 +1798,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1798 | ql_realign_skb(skb, length); | 1798 | ql_realign_skb(skb, length); |
1799 | skb_put(skb, length); | 1799 | skb_put(skb, length); |
1800 | pci_unmap_single(qdev->pdev, | 1800 | pci_unmap_single(qdev->pdev, |
1801 | pci_unmap_addr(sbq_desc, | 1801 | dma_unmap_addr(sbq_desc, |
1802 | mapaddr), | 1802 | mapaddr), |
1803 | pci_unmap_len(sbq_desc, | 1803 | dma_unmap_len(sbq_desc, |
1804 | maplen), | 1804 | maplen), |
1805 | PCI_DMA_FROMDEVICE); | 1805 | PCI_DMA_FROMDEVICE); |
1806 | sbq_desc->p.skb = NULL; | 1806 | sbq_desc->p.skb = NULL; |
@@ -1839,9 +1839,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1839 | return NULL; | 1839 | return NULL; |
1840 | } | 1840 | } |
1841 | pci_unmap_page(qdev->pdev, | 1841 | pci_unmap_page(qdev->pdev, |
1842 | pci_unmap_addr(lbq_desc, | 1842 | dma_unmap_addr(lbq_desc, |
1843 | mapaddr), | 1843 | mapaddr), |
1844 | pci_unmap_len(lbq_desc, maplen), | 1844 | dma_unmap_len(lbq_desc, maplen), |
1845 | PCI_DMA_FROMDEVICE); | 1845 | PCI_DMA_FROMDEVICE); |
1846 | skb_reserve(skb, NET_IP_ALIGN); | 1846 | skb_reserve(skb, NET_IP_ALIGN); |
1847 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, | 1847 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
@@ -1874,8 +1874,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1874 | int size, i = 0; | 1874 | int size, i = 0; |
1875 | sbq_desc = ql_get_curr_sbuf(rx_ring); | 1875 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1876 | pci_unmap_single(qdev->pdev, | 1876 | pci_unmap_single(qdev->pdev, |
1877 | pci_unmap_addr(sbq_desc, mapaddr), | 1877 | dma_unmap_addr(sbq_desc, mapaddr), |
1878 | pci_unmap_len(sbq_desc, maplen), | 1878 | dma_unmap_len(sbq_desc, maplen), |
1879 | PCI_DMA_FROMDEVICE); | 1879 | PCI_DMA_FROMDEVICE); |
1880 | if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { | 1880 | if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { |
1881 | /* | 1881 | /* |
@@ -2737,8 +2737,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring | |||
2737 | } | 2737 | } |
2738 | if (sbq_desc->p.skb) { | 2738 | if (sbq_desc->p.skb) { |
2739 | pci_unmap_single(qdev->pdev, | 2739 | pci_unmap_single(qdev->pdev, |
2740 | pci_unmap_addr(sbq_desc, mapaddr), | 2740 | dma_unmap_addr(sbq_desc, mapaddr), |
2741 | pci_unmap_len(sbq_desc, maplen), | 2741 | dma_unmap_len(sbq_desc, maplen), |
2742 | PCI_DMA_FROMDEVICE); | 2742 | PCI_DMA_FROMDEVICE); |
2743 | dev_kfree_skb(sbq_desc->p.skb); | 2743 | dev_kfree_skb(sbq_desc->p.skb); |
2744 | sbq_desc->p.skb = NULL; | 2744 | sbq_desc->p.skb = NULL; |