aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c148
1 files changed, 76 insertions, 72 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a72d6c..63a17d604a9 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59 59
60#define DRV_MODULE_VERSION "1.52.1-7" 60#define DRV_MODULE_VERSION "1.52.1-8"
61#define DRV_MODULE_RELDATE "2010/02/28" 61#define DRV_MODULE_RELDATE "2010/04/01"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
64#include <linux/firmware.h> 64#include <linux/firmware.h>
@@ -842,7 +842,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
842 /* unmap first bd */ 842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; 844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), 845 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); 846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847 847
848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +872,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
872 872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); 873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; 874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), 875 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); 876 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
877 if (--nbd) 877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 } 879 }
@@ -1086,7 +1086,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1086 if (!page) 1086 if (!page)
1087 return; 1087 return;
1088 1088
1089 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), 1089 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1090 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1090 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1091 __free_pages(page, PAGES_PER_SGE_SHIFT); 1091 __free_pages(page, PAGES_PER_SGE_SHIFT);
1092 1092
@@ -1115,15 +1115,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1115 if (unlikely(page == NULL)) 1115 if (unlikely(page == NULL))
1116 return -ENOMEM; 1116 return -ENOMEM;
1117 1117
1118 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, 1118 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1119 PCI_DMA_FROMDEVICE); 1119 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1120 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1120 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1121 __free_pages(page, PAGES_PER_SGE_SHIFT); 1121 __free_pages(page, PAGES_PER_SGE_SHIFT);
1122 return -ENOMEM; 1122 return -ENOMEM;
1123 } 1123 }
1124 1124
1125 sw_buf->page = page; 1125 sw_buf->page = page;
1126 pci_unmap_addr_set(sw_buf, mapping, mapping); 1126 dma_unmap_addr_set(sw_buf, mapping, mapping);
1127 1127
1128 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 1128 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1129 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 1129 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1143,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1143 if (unlikely(skb == NULL)) 1143 if (unlikely(skb == NULL))
1144 return -ENOMEM; 1144 return -ENOMEM;
1145 1145
1146 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, 1146 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1147 PCI_DMA_FROMDEVICE); 1147 DMA_FROM_DEVICE);
1148 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1148 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1149 dev_kfree_skb(skb); 1149 dev_kfree_skb(skb);
1150 return -ENOMEM; 1150 return -ENOMEM;
1151 } 1151 }
1152 1152
1153 rx_buf->skb = skb; 1153 rx_buf->skb = skb;
1154 pci_unmap_addr_set(rx_buf, mapping, mapping); 1154 dma_unmap_addr_set(rx_buf, mapping, mapping);
1155 1155
1156 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 1156 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1157 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 1157 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1173,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1173 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 1173 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1174 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 1174 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175 1175
1176 pci_dma_sync_single_for_device(bp->pdev, 1176 dma_sync_single_for_device(&bp->pdev->dev,
1177 pci_unmap_addr(cons_rx_buf, mapping), 1177 dma_unmap_addr(cons_rx_buf, mapping),
1178 RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 1178 RX_COPY_THRESH, DMA_FROM_DEVICE);
1179 1179
1180 prod_rx_buf->skb = cons_rx_buf->skb; 1180 prod_rx_buf->skb = cons_rx_buf->skb;
1181 pci_unmap_addr_set(prod_rx_buf, mapping, 1181 dma_unmap_addr_set(prod_rx_buf, mapping,
1182 pci_unmap_addr(cons_rx_buf, mapping)); 1182 dma_unmap_addr(cons_rx_buf, mapping));
1183 *prod_bd = *cons_bd; 1183 *prod_bd = *cons_bd;
1184} 1184}
1185 1185
@@ -1283,9 +1283,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1283 1283
1284 /* move empty skb from pool to prod and map it */ 1284 /* move empty skb from pool to prod and map it */
1285 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 1285 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1286 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, 1286 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1287 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1287 bp->rx_buf_size, DMA_FROM_DEVICE);
1288 pci_unmap_addr_set(prod_rx_buf, mapping, mapping); 1288 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1289 1289
1290 /* move partial skb from cons to pool (don't unmap yet) */ 1290 /* move partial skb from cons to pool (don't unmap yet) */
1291 fp->tpa_pool[queue] = *cons_rx_buf; 1291 fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1361,8 +1361,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1361 } 1361 }
1362 1362
1363 /* Unmap the page as we r going to pass it to the stack */ 1363 /* Unmap the page as we r going to pass it to the stack */
1364 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), 1364 dma_unmap_page(&bp->pdev->dev,
1365 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1365 dma_unmap_addr(&old_rx_pg, mapping),
1366 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1366 1367
1367 /* Add one frag and update the appropriate fields in the skb */ 1368 /* Add one frag and update the appropriate fields in the skb */
1368 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); 1369 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1390,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1389 /* Unmap skb in the pool anyway, as we are going to change 1390 /* Unmap skb in the pool anyway, as we are going to change
1390 pool entry status to BNX2X_TPA_STOP even if new skb allocation 1391 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1391 fails. */ 1392 fails. */
1392 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1393 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1393 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1394 bp->rx_buf_size, DMA_FROM_DEVICE);
1394 1395
1395 if (likely(new_skb)) { 1396 if (likely(new_skb)) {
1396 /* fix ip xsum and give it to the stack */ 1397 /* fix ip xsum and give it to the stack */
@@ -1441,12 +1442,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1441#ifdef BCM_VLAN 1442#ifdef BCM_VLAN
1442 if ((bp->vlgrp != NULL) && is_vlan_cqe && 1443 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1443 (!is_not_hwaccel_vlan_cqe)) 1444 (!is_not_hwaccel_vlan_cqe))
1444 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1445 vlan_gro_receive(&fp->napi, bp->vlgrp,
1445 le16_to_cpu(cqe->fast_path_cqe. 1446 le16_to_cpu(cqe->fast_path_cqe.
1446 vlan_tag)); 1447 vlan_tag), skb);
1447 else 1448 else
1448#endif 1449#endif
1449 netif_receive_skb(skb); 1450 napi_gro_receive(&fp->napi, skb);
1450 } else { 1451 } else {
1451 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 1452 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1452 " - dropping packet!\n"); 1453 " - dropping packet!\n");
@@ -1620,10 +1621,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1620 } 1621 }
1621 } 1622 }
1622 1623
1623 pci_dma_sync_single_for_device(bp->pdev, 1624 dma_sync_single_for_device(&bp->pdev->dev,
1624 pci_unmap_addr(rx_buf, mapping), 1625 dma_unmap_addr(rx_buf, mapping),
1625 pad + RX_COPY_THRESH, 1626 pad + RX_COPY_THRESH,
1626 PCI_DMA_FROMDEVICE); 1627 DMA_FROM_DEVICE);
1627 prefetch(skb); 1628 prefetch(skb);
1628 prefetch(((char *)(skb)) + 128); 1629 prefetch(((char *)(skb)) + 128);
1629 1630
@@ -1665,10 +1666,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1665 1666
1666 } else 1667 } else
1667 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 1668 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1668 pci_unmap_single(bp->pdev, 1669 dma_unmap_single(&bp->pdev->dev,
1669 pci_unmap_addr(rx_buf, mapping), 1670 dma_unmap_addr(rx_buf, mapping),
1670 bp->rx_buf_size, 1671 bp->rx_buf_size,
1671 PCI_DMA_FROMDEVICE); 1672 DMA_FROM_DEVICE);
1672 skb_reserve(skb, pad); 1673 skb_reserve(skb, pad);
1673 skb_put(skb, len); 1674 skb_put(skb, len);
1674 1675
@@ -1699,11 +1700,11 @@ reuse_rx:
1699 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && 1700 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1700 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & 1701 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1701 PARSING_FLAGS_VLAN)) 1702 PARSING_FLAGS_VLAN))
1702 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1703 vlan_gro_receive(&fp->napi, bp->vlgrp,
1703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); 1704 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1704 else 1705 else
1705#endif 1706#endif
1706 netif_receive_skb(skb); 1707 napi_gro_receive(&fp->napi, skb);
1707 1708
1708 1709
1709next_rx: 1710next_rx:
@@ -4940,9 +4941,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4940 } 4941 }
4941 4942
4942 if (fp->tpa_state[i] == BNX2X_TPA_START) 4943 if (fp->tpa_state[i] == BNX2X_TPA_START)
4943 pci_unmap_single(bp->pdev, 4944 dma_unmap_single(&bp->pdev->dev,
4944 pci_unmap_addr(rx_buf, mapping), 4945 dma_unmap_addr(rx_buf, mapping),
4945 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 4946 bp->rx_buf_size, DMA_FROM_DEVICE);
4946 4947
4947 dev_kfree_skb(skb); 4948 dev_kfree_skb(skb);
4948 rx_buf->skb = NULL; 4949 rx_buf->skb = NULL;
@@ -4978,7 +4979,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4978 fp->disable_tpa = 1; 4979 fp->disable_tpa = 1;
4979 break; 4980 break;
4980 } 4981 }
4981 pci_unmap_addr_set((struct sw_rx_bd *) 4982 dma_unmap_addr_set((struct sw_rx_bd *)
4982 &bp->fp->tpa_pool[i], 4983 &bp->fp->tpa_pool[i],
4983 mapping, 0); 4984 mapping, 0);
4984 fp->tpa_state[i] = BNX2X_TPA_STOP; 4985 fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5658,8 +5659,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5658 5659
5659static int bnx2x_gunzip_init(struct bnx2x *bp) 5660static int bnx2x_gunzip_init(struct bnx2x *bp)
5660{ 5661{
5661 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE, 5662 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5662 &bp->gunzip_mapping); 5663 &bp->gunzip_mapping, GFP_KERNEL);
5663 if (bp->gunzip_buf == NULL) 5664 if (bp->gunzip_buf == NULL)
5664 goto gunzip_nomem1; 5665 goto gunzip_nomem1;
5665 5666
@@ -5679,8 +5680,8 @@ gunzip_nomem3:
5679 bp->strm = NULL; 5680 bp->strm = NULL;
5680 5681
5681gunzip_nomem2: 5682gunzip_nomem2:
5682 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 5683 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5683 bp->gunzip_mapping); 5684 bp->gunzip_mapping);
5684 bp->gunzip_buf = NULL; 5685 bp->gunzip_buf = NULL;
5685 5686
5686gunzip_nomem1: 5687gunzip_nomem1:
@@ -5696,8 +5697,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
5696 bp->strm = NULL; 5697 bp->strm = NULL;
5697 5698
5698 if (bp->gunzip_buf) { 5699 if (bp->gunzip_buf) {
5699 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 5700 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5700 bp->gunzip_mapping); 5701 bp->gunzip_mapping);
5701 bp->gunzip_buf = NULL; 5702 bp->gunzip_buf = NULL;
5702 } 5703 }
5703} 5704}
@@ -6692,7 +6693,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6692#define BNX2X_PCI_FREE(x, y, size) \ 6693#define BNX2X_PCI_FREE(x, y, size) \
6693 do { \ 6694 do { \
6694 if (x) { \ 6695 if (x) { \
6695 pci_free_consistent(bp->pdev, size, x, y); \ 6696 dma_free_coherent(&bp->pdev->dev, size, x, y); \
6696 x = NULL; \ 6697 x = NULL; \
6697 y = 0; \ 6698 y = 0; \
6698 } \ 6699 } \
@@ -6773,7 +6774,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6773 6774
6774#define BNX2X_PCI_ALLOC(x, y, size) \ 6775#define BNX2X_PCI_ALLOC(x, y, size) \
6775 do { \ 6776 do { \
6776 x = pci_alloc_consistent(bp->pdev, size, y); \ 6777 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6777 if (x == NULL) \ 6778 if (x == NULL) \
6778 goto alloc_mem_err; \ 6779 goto alloc_mem_err; \
6779 memset(x, 0, size); \ 6780 memset(x, 0, size); \
@@ -6906,9 +6907,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6906 if (skb == NULL) 6907 if (skb == NULL)
6907 continue; 6908 continue;
6908 6909
6909 pci_unmap_single(bp->pdev, 6910 dma_unmap_single(&bp->pdev->dev,
6910 pci_unmap_addr(rx_buf, mapping), 6911 dma_unmap_addr(rx_buf, mapping),
6911 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 6912 bp->rx_buf_size, DMA_FROM_DEVICE);
6912 6913
6913 rx_buf->skb = NULL; 6914 rx_buf->skb = NULL;
6914 dev_kfree_skb(skb); 6915 dev_kfree_skb(skb);
@@ -8935,6 +8936,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8935 bp->multi_mode = multi_mode; 8936 bp->multi_mode = multi_mode;
8936 8937
8937 8938
8939 bp->dev->features |= NETIF_F_GRO;
8940
8938 /* Set TPA flags */ 8941 /* Set TPA flags */
8939 if (disable_tpa) { 8942 if (disable_tpa) {
8940 bp->flags &= ~TPA_ENABLE_FLAG; 8943 bp->flags &= ~TPA_ENABLE_FLAG;
@@ -10267,8 +10270,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10267 10270
10268 bd_prod = TX_BD(fp_tx->tx_bd_prod); 10271 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10269 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; 10272 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10270 mapping = pci_map_single(bp->pdev, skb->data, 10273 mapping = dma_map_single(&bp->pdev->dev, skb->data,
10271 skb_headlen(skb), PCI_DMA_TODEVICE); 10274 skb_headlen(skb), DMA_TO_DEVICE);
10272 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 10275 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10273 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 10276 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10274 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 10277 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -11314,8 +11317,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11314 } 11317 }
11315 } 11318 }
11316 11319
11317 mapping = pci_map_single(bp->pdev, skb->data, 11320 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11318 skb_headlen(skb), PCI_DMA_TODEVICE); 11321 skb_headlen(skb), DMA_TO_DEVICE);
11319 11322
11320 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 11323 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11321 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 11324 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11372,8 +11375,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11372 if (total_pkt_bd == NULL) 11375 if (total_pkt_bd == NULL)
11373 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; 11376 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11374 11377
11375 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, 11378 mapping = dma_map_page(&bp->pdev->dev, frag->page,
11376 frag->size, PCI_DMA_TODEVICE); 11379 frag->page_offset,
11380 frag->size, DMA_TO_DEVICE);
11377 11381
11378 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 11382 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11379 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 11383 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11494,21 +11498,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11494 else { /* some multicasts */ 11498 else { /* some multicasts */
11495 if (CHIP_IS_E1(bp)) { 11499 if (CHIP_IS_E1(bp)) {
11496 int i, old, offset; 11500 int i, old, offset;
11497 struct dev_mc_list *mclist; 11501 struct netdev_hw_addr *ha;
11498 struct mac_configuration_cmd *config = 11502 struct mac_configuration_cmd *config =
11499 bnx2x_sp(bp, mcast_config); 11503 bnx2x_sp(bp, mcast_config);
11500 11504
11501 i = 0; 11505 i = 0;
11502 netdev_for_each_mc_addr(mclist, dev) { 11506 netdev_for_each_mc_addr(ha, dev) {
11503 config->config_table[i]. 11507 config->config_table[i].
11504 cam_entry.msb_mac_addr = 11508 cam_entry.msb_mac_addr =
11505 swab16(*(u16 *)&mclist->dmi_addr[0]); 11509 swab16(*(u16 *)&ha->addr[0]);
11506 config->config_table[i]. 11510 config->config_table[i].
11507 cam_entry.middle_mac_addr = 11511 cam_entry.middle_mac_addr =
11508 swab16(*(u16 *)&mclist->dmi_addr[2]); 11512 swab16(*(u16 *)&ha->addr[2]);
11509 config->config_table[i]. 11513 config->config_table[i].
11510 cam_entry.lsb_mac_addr = 11514 cam_entry.lsb_mac_addr =
11511 swab16(*(u16 *)&mclist->dmi_addr[4]); 11515 swab16(*(u16 *)&ha->addr[4]);
11512 config->config_table[i].cam_entry.flags = 11516 config->config_table[i].cam_entry.flags =
11513 cpu_to_le16(port); 11517 cpu_to_le16(port);
11514 config->config_table[i]. 11518 config->config_table[i].
@@ -11562,18 +11566,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11562 0); 11566 0);
11563 } else { /* E1H */ 11567 } else { /* E1H */
11564 /* Accept one or more multicasts */ 11568 /* Accept one or more multicasts */
11565 struct dev_mc_list *mclist; 11569 struct netdev_hw_addr *ha;
11566 u32 mc_filter[MC_HASH_SIZE]; 11570 u32 mc_filter[MC_HASH_SIZE];
11567 u32 crc, bit, regidx; 11571 u32 crc, bit, regidx;
11568 int i; 11572 int i;
11569 11573
11570 memset(mc_filter, 0, 4 * MC_HASH_SIZE); 11574 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11571 11575
11572 netdev_for_each_mc_addr(mclist, dev) { 11576 netdev_for_each_mc_addr(ha, dev) {
11573 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 11577 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11574 mclist->dmi_addr); 11578 ha->addr);
11575 11579
11576 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); 11580 crc = crc32c_le(0, ha->addr, ETH_ALEN);
11577 bit = (crc >> 24) & 0xff; 11581 bit = (crc >> 24) & 0xff;
11578 regidx = bit >> 5; 11582 regidx = bit >> 5;
11579 bit &= 0x1f; 11583 bit &= 0x1f;
@@ -11830,15 +11834,15 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11830 goto err_out_release; 11834 goto err_out_release;
11831 } 11835 }
11832 11836
11833 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 11837 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
11834 bp->flags |= USING_DAC_FLAG; 11838 bp->flags |= USING_DAC_FLAG;
11835 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 11839 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
11836 pr_err("pci_set_consistent_dma_mask failed, aborting\n"); 11840 pr_err("dma_set_coherent_mask failed, aborting\n");
11837 rc = -EIO; 11841 rc = -EIO;
11838 goto err_out_release; 11842 goto err_out_release;
11839 } 11843 }
11840 11844
11841 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 11845 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11842 pr_err("System does not support DMA, aborting\n"); 11846 pr_err("System does not support DMA, aborting\n");
11843 rc = -EIO; 11847 rc = -EIO;
11844 goto err_out_release; 11848 goto err_out_release;