diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2010-04-12 10:32:13 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-13 05:54:17 -0400 |
commit | 87196eb740f3f73105a5c13bbf7651b4b60daec1 (patch) | |
tree | e2d8489c7096276d09a47829be601414be5991e7 | |
parent | 4e5e4f0d65975ce092202cce48b42571bf84591e (diff) |
qla3xxx: use the DMA state API instead of the pci equivalents
This replace the PCI DMA state API (include/linux/pci-dma.h) with the
DMA equivalents since the PCI DMA state API will be obsolete.
No functional change.
For further information about the background:
http://marc.info/?l=linux-netdev&m=127037540020276&w=2
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/qla3xxx.c | 64 | ||||
-rw-r--r-- | drivers/net/qla3xxx.h | 8 |
2 files changed, 36 insertions, 36 deletions
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index fc11ad3f64b0..01a6ca303a17 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -343,8 +343,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
343 | cpu_to_le32(LS_64BITS(map)); | 343 | cpu_to_le32(LS_64BITS(map)); |
344 | lrg_buf_cb->buf_phy_addr_high = | 344 | lrg_buf_cb->buf_phy_addr_high = |
345 | cpu_to_le32(MS_64BITS(map)); | 345 | cpu_to_le32(MS_64BITS(map)); |
346 | pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); | 346 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); |
347 | pci_unmap_len_set(lrg_buf_cb, maplen, | 347 | dma_unmap_len_set(lrg_buf_cb, maplen, |
348 | qdev->lrg_buffer_len - | 348 | qdev->lrg_buffer_len - |
349 | QL_HEADER_SPACE); | 349 | QL_HEADER_SPACE); |
350 | } | 350 | } |
@@ -1924,8 +1924,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1924 | cpu_to_le32(LS_64BITS(map)); | 1924 | cpu_to_le32(LS_64BITS(map)); |
1925 | lrg_buf_cb->buf_phy_addr_high = | 1925 | lrg_buf_cb->buf_phy_addr_high = |
1926 | cpu_to_le32(MS_64BITS(map)); | 1926 | cpu_to_le32(MS_64BITS(map)); |
1927 | pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); | 1927 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); |
1928 | pci_unmap_len_set(lrg_buf_cb, maplen, | 1928 | dma_unmap_len_set(lrg_buf_cb, maplen, |
1929 | qdev->lrg_buffer_len - | 1929 | qdev->lrg_buffer_len - |
1930 | QL_HEADER_SPACE); | 1930 | QL_HEADER_SPACE); |
1931 | --qdev->lrg_buf_skb_check; | 1931 | --qdev->lrg_buf_skb_check; |
@@ -2041,16 +2041,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, | |||
2041 | } | 2041 | } |
2042 | 2042 | ||
2043 | pci_unmap_single(qdev->pdev, | 2043 | pci_unmap_single(qdev->pdev, |
2044 | pci_unmap_addr(&tx_cb->map[0], mapaddr), | 2044 | dma_unmap_addr(&tx_cb->map[0], mapaddr), |
2045 | pci_unmap_len(&tx_cb->map[0], maplen), | 2045 | dma_unmap_len(&tx_cb->map[0], maplen), |
2046 | PCI_DMA_TODEVICE); | 2046 | PCI_DMA_TODEVICE); |
2047 | tx_cb->seg_count--; | 2047 | tx_cb->seg_count--; |
2048 | if (tx_cb->seg_count) { | 2048 | if (tx_cb->seg_count) { |
2049 | for (i = 1; i < tx_cb->seg_count; i++) { | 2049 | for (i = 1; i < tx_cb->seg_count; i++) { |
2050 | pci_unmap_page(qdev->pdev, | 2050 | pci_unmap_page(qdev->pdev, |
2051 | pci_unmap_addr(&tx_cb->map[i], | 2051 | dma_unmap_addr(&tx_cb->map[i], |
2052 | mapaddr), | 2052 | mapaddr), |
2053 | pci_unmap_len(&tx_cb->map[i], maplen), | 2053 | dma_unmap_len(&tx_cb->map[i], maplen), |
2054 | PCI_DMA_TODEVICE); | 2054 | PCI_DMA_TODEVICE); |
2055 | } | 2055 | } |
2056 | } | 2056 | } |
@@ -2119,8 +2119,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, | |||
2119 | 2119 | ||
2120 | skb_put(skb, length); | 2120 | skb_put(skb, length); |
2121 | pci_unmap_single(qdev->pdev, | 2121 | pci_unmap_single(qdev->pdev, |
2122 | pci_unmap_addr(lrg_buf_cb2, mapaddr), | 2122 | dma_unmap_addr(lrg_buf_cb2, mapaddr), |
2123 | pci_unmap_len(lrg_buf_cb2, maplen), | 2123 | dma_unmap_len(lrg_buf_cb2, maplen), |
2124 | PCI_DMA_FROMDEVICE); | 2124 | PCI_DMA_FROMDEVICE); |
2125 | prefetch(skb->data); | 2125 | prefetch(skb->data); |
2126 | skb->ip_summed = CHECKSUM_NONE; | 2126 | skb->ip_summed = CHECKSUM_NONE; |
@@ -2165,8 +2165,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, | |||
2165 | 2165 | ||
2166 | skb_put(skb2, length); /* Just the second buffer length here. */ | 2166 | skb_put(skb2, length); /* Just the second buffer length here. */ |
2167 | pci_unmap_single(qdev->pdev, | 2167 | pci_unmap_single(qdev->pdev, |
2168 | pci_unmap_addr(lrg_buf_cb2, mapaddr), | 2168 | dma_unmap_addr(lrg_buf_cb2, mapaddr), |
2169 | pci_unmap_len(lrg_buf_cb2, maplen), | 2169 | dma_unmap_len(lrg_buf_cb2, maplen), |
2170 | PCI_DMA_FROMDEVICE); | 2170 | PCI_DMA_FROMDEVICE); |
2171 | prefetch(skb2->data); | 2171 | prefetch(skb2->data); |
2172 | 2172 | ||
@@ -2454,8 +2454,8 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2454 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | 2454 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); |
2455 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | 2455 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); |
2456 | oal_entry->len = cpu_to_le32(len); | 2456 | oal_entry->len = cpu_to_le32(len); |
2457 | pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | 2457 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); |
2458 | pci_unmap_len_set(&tx_cb->map[seg], maplen, len); | 2458 | dma_unmap_len_set(&tx_cb->map[seg], maplen, len); |
2459 | seg++; | 2459 | seg++; |
2460 | 2460 | ||
2461 | if (seg_cnt == 1) { | 2461 | if (seg_cnt == 1) { |
@@ -2488,9 +2488,9 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2488 | oal_entry->len = | 2488 | oal_entry->len = |
2489 | cpu_to_le32(sizeof(struct oal) | | 2489 | cpu_to_le32(sizeof(struct oal) | |
2490 | OAL_CONT_ENTRY); | 2490 | OAL_CONT_ENTRY); |
2491 | pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, | 2491 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, |
2492 | map); | 2492 | map); |
2493 | pci_unmap_len_set(&tx_cb->map[seg], maplen, | 2493 | dma_unmap_len_set(&tx_cb->map[seg], maplen, |
2494 | sizeof(struct oal)); | 2494 | sizeof(struct oal)); |
2495 | oal_entry = (struct oal_entry *)oal; | 2495 | oal_entry = (struct oal_entry *)oal; |
2496 | oal++; | 2496 | oal++; |
@@ -2512,8 +2512,8 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2512 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); | 2512 | oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); |
2513 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); | 2513 | oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); |
2514 | oal_entry->len = cpu_to_le32(frag->size); | 2514 | oal_entry->len = cpu_to_le32(frag->size); |
2515 | pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); | 2515 | dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); |
2516 | pci_unmap_len_set(&tx_cb->map[seg], maplen, | 2516 | dma_unmap_len_set(&tx_cb->map[seg], maplen, |
2517 | frag->size); | 2517 | frag->size); |
2518 | } | 2518 | } |
2519 | /* Terminate the last segment. */ | 2519 | /* Terminate the last segment. */ |
@@ -2539,22 +2539,22 @@ map_error: | |||
2539 | (seg == 12 && seg_cnt > 13) || /* but necessary. */ | 2539 | (seg == 12 && seg_cnt > 13) || /* but necessary. */ |
2540 | (seg == 17 && seg_cnt > 18)) { | 2540 | (seg == 17 && seg_cnt > 18)) { |
2541 | pci_unmap_single(qdev->pdev, | 2541 | pci_unmap_single(qdev->pdev, |
2542 | pci_unmap_addr(&tx_cb->map[seg], mapaddr), | 2542 | dma_unmap_addr(&tx_cb->map[seg], mapaddr), |
2543 | pci_unmap_len(&tx_cb->map[seg], maplen), | 2543 | dma_unmap_len(&tx_cb->map[seg], maplen), |
2544 | PCI_DMA_TODEVICE); | 2544 | PCI_DMA_TODEVICE); |
2545 | oal++; | 2545 | oal++; |
2546 | seg++; | 2546 | seg++; |
2547 | } | 2547 | } |
2548 | 2548 | ||
2549 | pci_unmap_page(qdev->pdev, | 2549 | pci_unmap_page(qdev->pdev, |
2550 | pci_unmap_addr(&tx_cb->map[seg], mapaddr), | 2550 | dma_unmap_addr(&tx_cb->map[seg], mapaddr), |
2551 | pci_unmap_len(&tx_cb->map[seg], maplen), | 2551 | dma_unmap_len(&tx_cb->map[seg], maplen), |
2552 | PCI_DMA_TODEVICE); | 2552 | PCI_DMA_TODEVICE); |
2553 | } | 2553 | } |
2554 | 2554 | ||
2555 | pci_unmap_single(qdev->pdev, | 2555 | pci_unmap_single(qdev->pdev, |
2556 | pci_unmap_addr(&tx_cb->map[0], mapaddr), | 2556 | dma_unmap_addr(&tx_cb->map[0], mapaddr), |
2557 | pci_unmap_addr(&tx_cb->map[0], maplen), | 2557 | dma_unmap_addr(&tx_cb->map[0], maplen), |
2558 | PCI_DMA_TODEVICE); | 2558 | PCI_DMA_TODEVICE); |
2559 | 2559 | ||
2560 | return NETDEV_TX_BUSY; | 2560 | return NETDEV_TX_BUSY; |
@@ -2841,8 +2841,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev) | |||
2841 | if (lrg_buf_cb->skb) { | 2841 | if (lrg_buf_cb->skb) { |
2842 | dev_kfree_skb(lrg_buf_cb->skb); | 2842 | dev_kfree_skb(lrg_buf_cb->skb); |
2843 | pci_unmap_single(qdev->pdev, | 2843 | pci_unmap_single(qdev->pdev, |
2844 | pci_unmap_addr(lrg_buf_cb, mapaddr), | 2844 | dma_unmap_addr(lrg_buf_cb, mapaddr), |
2845 | pci_unmap_len(lrg_buf_cb, maplen), | 2845 | dma_unmap_len(lrg_buf_cb, maplen), |
2846 | PCI_DMA_FROMDEVICE); | 2846 | PCI_DMA_FROMDEVICE); |
2847 | memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); | 2847 | memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); |
2848 | } else { | 2848 | } else { |
@@ -2912,8 +2912,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | |||
2912 | return -ENOMEM; | 2912 | return -ENOMEM; |
2913 | } | 2913 | } |
2914 | 2914 | ||
2915 | pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); | 2915 | dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); |
2916 | pci_unmap_len_set(lrg_buf_cb, maplen, | 2916 | dma_unmap_len_set(lrg_buf_cb, maplen, |
2917 | qdev->lrg_buffer_len - | 2917 | qdev->lrg_buffer_len - |
2918 | QL_HEADER_SPACE); | 2918 | QL_HEADER_SPACE); |
2919 | lrg_buf_cb->buf_phy_addr_low = | 2919 | lrg_buf_cb->buf_phy_addr_low = |
@@ -3793,13 +3793,13 @@ static void ql_reset_work(struct work_struct *work) | |||
3793 | "%s: Freeing lost SKB.\n", | 3793 | "%s: Freeing lost SKB.\n", |
3794 | qdev->ndev->name); | 3794 | qdev->ndev->name); |
3795 | pci_unmap_single(qdev->pdev, | 3795 | pci_unmap_single(qdev->pdev, |
3796 | pci_unmap_addr(&tx_cb->map[0], mapaddr), | 3796 | dma_unmap_addr(&tx_cb->map[0], mapaddr), |
3797 | pci_unmap_len(&tx_cb->map[0], maplen), | 3797 | dma_unmap_len(&tx_cb->map[0], maplen), |
3798 | PCI_DMA_TODEVICE); | 3798 | PCI_DMA_TODEVICE); |
3799 | for(j=1;j<tx_cb->seg_count;j++) { | 3799 | for(j=1;j<tx_cb->seg_count;j++) { |
3800 | pci_unmap_page(qdev->pdev, | 3800 | pci_unmap_page(qdev->pdev, |
3801 | pci_unmap_addr(&tx_cb->map[j],mapaddr), | 3801 | dma_unmap_addr(&tx_cb->map[j],mapaddr), |
3802 | pci_unmap_len(&tx_cb->map[j],maplen), | 3802 | dma_unmap_len(&tx_cb->map[j],maplen), |
3803 | PCI_DMA_TODEVICE); | 3803 | PCI_DMA_TODEVICE); |
3804 | } | 3804 | } |
3805 | dev_kfree_skb(tx_cb->skb); | 3805 | dev_kfree_skb(tx_cb->skb); |
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index 7113e71b15a1..3362a661248c 100644 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h | |||
@@ -998,8 +998,8 @@ enum link_state_t { | |||
998 | struct ql_rcv_buf_cb { | 998 | struct ql_rcv_buf_cb { |
999 | struct ql_rcv_buf_cb *next; | 999 | struct ql_rcv_buf_cb *next; |
1000 | struct sk_buff *skb; | 1000 | struct sk_buff *skb; |
1001 | DECLARE_PCI_UNMAP_ADDR(mapaddr); | 1001 | DEFINE_DMA_UNMAP_ADDR(mapaddr); |
1002 | DECLARE_PCI_UNMAP_LEN(maplen); | 1002 | DEFINE_DMA_UNMAP_LEN(maplen); |
1003 | __le32 buf_phy_addr_low; | 1003 | __le32 buf_phy_addr_low; |
1004 | __le32 buf_phy_addr_high; | 1004 | __le32 buf_phy_addr_high; |
1005 | int index; | 1005 | int index; |
@@ -1029,8 +1029,8 @@ struct oal { | |||
1029 | }; | 1029 | }; |
1030 | 1030 | ||
1031 | struct map_list { | 1031 | struct map_list { |
1032 | DECLARE_PCI_UNMAP_ADDR(mapaddr); | 1032 | DEFINE_DMA_UNMAP_ADDR(mapaddr); |
1033 | DECLARE_PCI_UNMAP_LEN(maplen); | 1033 | DEFINE_DMA_UNMAP_LEN(maplen); |
1034 | }; | 1034 | }; |
1035 | 1035 | ||
1036 | struct ql_tx_buf_cb { | 1036 | struct ql_tx_buf_cb { |