diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2010-04-03 21:51:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-04-08 00:05:35 -0400 |
commit | 1a98314273ad6a3dc048925cf71d9a8cee3560d8 (patch) | |
tree | 35abd828c2112f0aac2521a81214f8d1c742d9d7 /drivers | |
parent | 1a4ccc2d460f252853dfa2fb38b4ea881916713d (diff) |
bnx2x: use the DMA API instead of the pci equivalents
The DMA API is preferred.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Vladislav Zolotarov <vladz@broadcom.com>
Acked-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/bnx2x.h | 4 | ||||
-rw-r--r-- | drivers/net/bnx2x_main.c | 110 |
2 files changed, 58 insertions, 56 deletions
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 3c48a7a68308..ae9c89ebcc8b 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
@@ -163,7 +163,7 @@ do { \ | |||
163 | 163 | ||
164 | struct sw_rx_bd { | 164 | struct sw_rx_bd { |
165 | struct sk_buff *skb; | 165 | struct sk_buff *skb; |
166 | DECLARE_PCI_UNMAP_ADDR(mapping) | 166 | DEFINE_DMA_UNMAP_ADDR(mapping); |
167 | }; | 167 | }; |
168 | 168 | ||
169 | struct sw_tx_bd { | 169 | struct sw_tx_bd { |
@@ -176,7 +176,7 @@ struct sw_tx_bd { | |||
176 | 176 | ||
177 | struct sw_rx_page { | 177 | struct sw_rx_page { |
178 | struct page *page; | 178 | struct page *page; |
179 | DECLARE_PCI_UNMAP_ADDR(mapping) | 179 | DEFINE_DMA_UNMAP_ADDR(mapping); |
180 | }; | 180 | }; |
181 | 181 | ||
182 | union db_prod { | 182 | union db_prod { |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index fa9275c2ef5c..63a17d604a98 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -842,7 +842,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
842 | /* unmap first bd */ | 842 | /* unmap first bd */ |
843 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); | 843 | DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); |
844 | tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; | 844 | tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; |
845 | pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), | 845 | dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), |
846 | BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); | 846 | BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); |
847 | 847 | ||
848 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; | 848 | nbd = le16_to_cpu(tx_start_bd->nbd) - 1; |
@@ -872,8 +872,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
872 | 872 | ||
873 | DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); | 873 | DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); |
874 | tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; | 874 | tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; |
875 | pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), | 875 | dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), |
876 | BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); | 876 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); |
877 | if (--nbd) | 877 | if (--nbd) |
878 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); | 878 | bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); |
879 | } | 879 | } |
@@ -1086,7 +1086,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, | |||
1086 | if (!page) | 1086 | if (!page) |
1087 | return; | 1087 | return; |
1088 | 1088 | ||
1089 | pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), | 1089 | dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), |
1090 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | 1090 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); |
1091 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 1091 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
1092 | 1092 | ||
@@ -1115,15 +1115,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | |||
1115 | if (unlikely(page == NULL)) | 1115 | if (unlikely(page == NULL)) |
1116 | return -ENOMEM; | 1116 | return -ENOMEM; |
1117 | 1117 | ||
1118 | mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, | 1118 | mapping = dma_map_page(&bp->pdev->dev, page, 0, |
1119 | PCI_DMA_FROMDEVICE); | 1119 | SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); |
1120 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | 1120 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1121 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 1121 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
1122 | return -ENOMEM; | 1122 | return -ENOMEM; |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | sw_buf->page = page; | 1125 | sw_buf->page = page; |
1126 | pci_unmap_addr_set(sw_buf, mapping, mapping); | 1126 | dma_unmap_addr_set(sw_buf, mapping, mapping); |
1127 | 1127 | ||
1128 | sge->addr_hi = cpu_to_le32(U64_HI(mapping)); | 1128 | sge->addr_hi = cpu_to_le32(U64_HI(mapping)); |
1129 | sge->addr_lo = cpu_to_le32(U64_LO(mapping)); | 1129 | sge->addr_lo = cpu_to_le32(U64_LO(mapping)); |
@@ -1143,15 +1143,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
1143 | if (unlikely(skb == NULL)) | 1143 | if (unlikely(skb == NULL)) |
1144 | return -ENOMEM; | 1144 | return -ENOMEM; |
1145 | 1145 | ||
1146 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, | 1146 | mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, |
1147 | PCI_DMA_FROMDEVICE); | 1147 | DMA_FROM_DEVICE); |
1148 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { | 1148 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1149 | dev_kfree_skb(skb); | 1149 | dev_kfree_skb(skb); |
1150 | return -ENOMEM; | 1150 | return -ENOMEM; |
1151 | } | 1151 | } |
1152 | 1152 | ||
1153 | rx_buf->skb = skb; | 1153 | rx_buf->skb = skb; |
1154 | pci_unmap_addr_set(rx_buf, mapping, mapping); | 1154 | dma_unmap_addr_set(rx_buf, mapping, mapping); |
1155 | 1155 | ||
1156 | rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 1156 | rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
1157 | rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 1157 | rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
@@ -1173,13 +1173,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp, | |||
1173 | struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; | 1173 | struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; |
1174 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; | 1174 | struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; |
1175 | 1175 | ||
1176 | pci_dma_sync_single_for_device(bp->pdev, | 1176 | dma_sync_single_for_device(&bp->pdev->dev, |
1177 | pci_unmap_addr(cons_rx_buf, mapping), | 1177 | dma_unmap_addr(cons_rx_buf, mapping), |
1178 | RX_COPY_THRESH, PCI_DMA_FROMDEVICE); | 1178 | RX_COPY_THRESH, DMA_FROM_DEVICE); |
1179 | 1179 | ||
1180 | prod_rx_buf->skb = cons_rx_buf->skb; | 1180 | prod_rx_buf->skb = cons_rx_buf->skb; |
1181 | pci_unmap_addr_set(prod_rx_buf, mapping, | 1181 | dma_unmap_addr_set(prod_rx_buf, mapping, |
1182 | pci_unmap_addr(cons_rx_buf, mapping)); | 1182 | dma_unmap_addr(cons_rx_buf, mapping)); |
1183 | *prod_bd = *cons_bd; | 1183 | *prod_bd = *cons_bd; |
1184 | } | 1184 | } |
1185 | 1185 | ||
@@ -1283,9 +1283,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
1283 | 1283 | ||
1284 | /* move empty skb from pool to prod and map it */ | 1284 | /* move empty skb from pool to prod and map it */ |
1285 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; | 1285 | prod_rx_buf->skb = fp->tpa_pool[queue].skb; |
1286 | mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, | 1286 | mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, |
1287 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); | 1287 | bp->rx_buf_size, DMA_FROM_DEVICE); |
1288 | pci_unmap_addr_set(prod_rx_buf, mapping, mapping); | 1288 | dma_unmap_addr_set(prod_rx_buf, mapping, mapping); |
1289 | 1289 | ||
1290 | /* move partial skb from cons to pool (don't unmap yet) */ | 1290 | /* move partial skb from cons to pool (don't unmap yet) */ |
1291 | fp->tpa_pool[queue] = *cons_rx_buf; | 1291 | fp->tpa_pool[queue] = *cons_rx_buf; |
@@ -1361,8 +1361,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1361 | } | 1361 | } |
1362 | 1362 | ||
1363 | /* Unmap the page as we r going to pass it to the stack */ | 1363 | /* Unmap the page as we r going to pass it to the stack */ |
1364 | pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), | 1364 | dma_unmap_page(&bp->pdev->dev, |
1365 | SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); | 1365 | dma_unmap_addr(&old_rx_pg, mapping), |
1366 | SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); | ||
1366 | 1367 | ||
1367 | /* Add one frag and update the appropriate fields in the skb */ | 1368 | /* Add one frag and update the appropriate fields in the skb */ |
1368 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); | 1369 | skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); |
@@ -1389,8 +1390,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
1389 | /* Unmap skb in the pool anyway, as we are going to change | 1390 | /* Unmap skb in the pool anyway, as we are going to change |
1390 | pool entry status to BNX2X_TPA_STOP even if new skb allocation | 1391 | pool entry status to BNX2X_TPA_STOP even if new skb allocation |
1391 | fails. */ | 1392 | fails. */ |
1392 | pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), | 1393 | dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), |
1393 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); | 1394 | bp->rx_buf_size, DMA_FROM_DEVICE); |
1394 | 1395 | ||
1395 | if (likely(new_skb)) { | 1396 | if (likely(new_skb)) { |
1396 | /* fix ip xsum and give it to the stack */ | 1397 | /* fix ip xsum and give it to the stack */ |
@@ -1620,10 +1621,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1620 | } | 1621 | } |
1621 | } | 1622 | } |
1622 | 1623 | ||
1623 | pci_dma_sync_single_for_device(bp->pdev, | 1624 | dma_sync_single_for_device(&bp->pdev->dev, |
1624 | pci_unmap_addr(rx_buf, mapping), | 1625 | dma_unmap_addr(rx_buf, mapping), |
1625 | pad + RX_COPY_THRESH, | 1626 | pad + RX_COPY_THRESH, |
1626 | PCI_DMA_FROMDEVICE); | 1627 | DMA_FROM_DEVICE); |
1627 | prefetch(skb); | 1628 | prefetch(skb); |
1628 | prefetch(((char *)(skb)) + 128); | 1629 | prefetch(((char *)(skb)) + 128); |
1629 | 1630 | ||
@@ -1665,10 +1666,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
1665 | 1666 | ||
1666 | } else | 1667 | } else |
1667 | if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { | 1668 | if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { |
1668 | pci_unmap_single(bp->pdev, | 1669 | dma_unmap_single(&bp->pdev->dev, |
1669 | pci_unmap_addr(rx_buf, mapping), | 1670 | dma_unmap_addr(rx_buf, mapping), |
1670 | bp->rx_buf_size, | 1671 | bp->rx_buf_size, |
1671 | PCI_DMA_FROMDEVICE); | 1672 | DMA_FROM_DEVICE); |
1672 | skb_reserve(skb, pad); | 1673 | skb_reserve(skb, pad); |
1673 | skb_put(skb, len); | 1674 | skb_put(skb, len); |
1674 | 1675 | ||
@@ -4940,9 +4941,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, | |||
4940 | } | 4941 | } |
4941 | 4942 | ||
4942 | if (fp->tpa_state[i] == BNX2X_TPA_START) | 4943 | if (fp->tpa_state[i] == BNX2X_TPA_START) |
4943 | pci_unmap_single(bp->pdev, | 4944 | dma_unmap_single(&bp->pdev->dev, |
4944 | pci_unmap_addr(rx_buf, mapping), | 4945 | dma_unmap_addr(rx_buf, mapping), |
4945 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); | 4946 | bp->rx_buf_size, DMA_FROM_DEVICE); |
4946 | 4947 | ||
4947 | dev_kfree_skb(skb); | 4948 | dev_kfree_skb(skb); |
4948 | rx_buf->skb = NULL; | 4949 | rx_buf->skb = NULL; |
@@ -4978,7 +4979,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4978 | fp->disable_tpa = 1; | 4979 | fp->disable_tpa = 1; |
4979 | break; | 4980 | break; |
4980 | } | 4981 | } |
4981 | pci_unmap_addr_set((struct sw_rx_bd *) | 4982 | dma_unmap_addr_set((struct sw_rx_bd *) |
4982 | &bp->fp->tpa_pool[i], | 4983 | &bp->fp->tpa_pool[i], |
4983 | mapping, 0); | 4984 | mapping, 0); |
4984 | fp->tpa_state[i] = BNX2X_TPA_STOP; | 4985 | fp->tpa_state[i] = BNX2X_TPA_STOP; |
@@ -5658,8 +5659,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) | |||
5658 | 5659 | ||
5659 | static int bnx2x_gunzip_init(struct bnx2x *bp) | 5660 | static int bnx2x_gunzip_init(struct bnx2x *bp) |
5660 | { | 5661 | { |
5661 | bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE, | 5662 | bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE, |
5662 | &bp->gunzip_mapping); | 5663 | &bp->gunzip_mapping, GFP_KERNEL); |
5663 | if (bp->gunzip_buf == NULL) | 5664 | if (bp->gunzip_buf == NULL) |
5664 | goto gunzip_nomem1; | 5665 | goto gunzip_nomem1; |
5665 | 5666 | ||
@@ -5679,8 +5680,8 @@ gunzip_nomem3: | |||
5679 | bp->strm = NULL; | 5680 | bp->strm = NULL; |
5680 | 5681 | ||
5681 | gunzip_nomem2: | 5682 | gunzip_nomem2: |
5682 | pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, | 5683 | dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, |
5683 | bp->gunzip_mapping); | 5684 | bp->gunzip_mapping); |
5684 | bp->gunzip_buf = NULL; | 5685 | bp->gunzip_buf = NULL; |
5685 | 5686 | ||
5686 | gunzip_nomem1: | 5687 | gunzip_nomem1: |
@@ -5696,8 +5697,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp) | |||
5696 | bp->strm = NULL; | 5697 | bp->strm = NULL; |
5697 | 5698 | ||
5698 | if (bp->gunzip_buf) { | 5699 | if (bp->gunzip_buf) { |
5699 | pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, | 5700 | dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf, |
5700 | bp->gunzip_mapping); | 5701 | bp->gunzip_mapping); |
5701 | bp->gunzip_buf = NULL; | 5702 | bp->gunzip_buf = NULL; |
5702 | } | 5703 | } |
5703 | } | 5704 | } |
@@ -6692,7 +6693,7 @@ static void bnx2x_free_mem(struct bnx2x *bp) | |||
6692 | #define BNX2X_PCI_FREE(x, y, size) \ | 6693 | #define BNX2X_PCI_FREE(x, y, size) \ |
6693 | do { \ | 6694 | do { \ |
6694 | if (x) { \ | 6695 | if (x) { \ |
6695 | pci_free_consistent(bp->pdev, size, x, y); \ | 6696 | dma_free_coherent(&bp->pdev->dev, size, x, y); \ |
6696 | x = NULL; \ | 6697 | x = NULL; \ |
6697 | y = 0; \ | 6698 | y = 0; \ |
6698 | } \ | 6699 | } \ |
@@ -6773,7 +6774,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp) | |||
6773 | 6774 | ||
6774 | #define BNX2X_PCI_ALLOC(x, y, size) \ | 6775 | #define BNX2X_PCI_ALLOC(x, y, size) \ |
6775 | do { \ | 6776 | do { \ |
6776 | x = pci_alloc_consistent(bp->pdev, size, y); \ | 6777 | x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ |
6777 | if (x == NULL) \ | 6778 | if (x == NULL) \ |
6778 | goto alloc_mem_err; \ | 6779 | goto alloc_mem_err; \ |
6779 | memset(x, 0, size); \ | 6780 | memset(x, 0, size); \ |
@@ -6906,9 +6907,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
6906 | if (skb == NULL) | 6907 | if (skb == NULL) |
6907 | continue; | 6908 | continue; |
6908 | 6909 | ||
6909 | pci_unmap_single(bp->pdev, | 6910 | dma_unmap_single(&bp->pdev->dev, |
6910 | pci_unmap_addr(rx_buf, mapping), | 6911 | dma_unmap_addr(rx_buf, mapping), |
6911 | bp->rx_buf_size, PCI_DMA_FROMDEVICE); | 6912 | bp->rx_buf_size, DMA_FROM_DEVICE); |
6912 | 6913 | ||
6913 | rx_buf->skb = NULL; | 6914 | rx_buf->skb = NULL; |
6914 | dev_kfree_skb(skb); | 6915 | dev_kfree_skb(skb); |
@@ -10269,8 +10270,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) | |||
10269 | 10270 | ||
10270 | bd_prod = TX_BD(fp_tx->tx_bd_prod); | 10271 | bd_prod = TX_BD(fp_tx->tx_bd_prod); |
10271 | tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; | 10272 | tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; |
10272 | mapping = pci_map_single(bp->pdev, skb->data, | 10273 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
10273 | skb_headlen(skb), PCI_DMA_TODEVICE); | 10274 | skb_headlen(skb), DMA_TO_DEVICE); |
10274 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 10275 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
10275 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 10276 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
10276 | tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ | 10277 | tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ |
@@ -11316,8 +11317,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
11316 | } | 11317 | } |
11317 | } | 11318 | } |
11318 | 11319 | ||
11319 | mapping = pci_map_single(bp->pdev, skb->data, | 11320 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
11320 | skb_headlen(skb), PCI_DMA_TODEVICE); | 11321 | skb_headlen(skb), DMA_TO_DEVICE); |
11321 | 11322 | ||
11322 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 11323 | tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
11323 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 11324 | tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
@@ -11374,8 +11375,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
11374 | if (total_pkt_bd == NULL) | 11375 | if (total_pkt_bd == NULL) |
11375 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; | 11376 | total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; |
11376 | 11377 | ||
11377 | mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, | 11378 | mapping = dma_map_page(&bp->pdev->dev, frag->page, |
11378 | frag->size, PCI_DMA_TODEVICE); | 11379 | frag->page_offset, |
11380 | frag->size, DMA_TO_DEVICE); | ||
11379 | 11381 | ||
11380 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); | 11382 | tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); |
11381 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); | 11383 | tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); |
@@ -11832,15 +11834,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, | |||
11832 | goto err_out_release; | 11834 | goto err_out_release; |
11833 | } | 11835 | } |
11834 | 11836 | ||
11835 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { | 11837 | if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) { |
11836 | bp->flags |= USING_DAC_FLAG; | 11838 | bp->flags |= USING_DAC_FLAG; |
11837 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { | 11839 | if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) { |
11838 | pr_err("pci_set_consistent_dma_mask failed, aborting\n"); | 11840 | pr_err("dma_set_coherent_mask failed, aborting\n"); |
11839 | rc = -EIO; | 11841 | rc = -EIO; |
11840 | goto err_out_release; | 11842 | goto err_out_release; |
11841 | } | 11843 | } |
11842 | 11844 | ||
11843 | } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { | 11845 | } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) { |
11844 | pr_err("System does not support DMA, aborting\n"); | 11846 | pr_err("System does not support DMA, aborting\n"); |
11845 | rc = -EIO; | 11847 | rc = -EIO; |
11846 | goto err_out_release; | 11848 | goto err_out_release; |