aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/firewire/fw-iso.c2
-rw-r--r--drivers/firewire/fw-ohci.c2
-rw-r--r--drivers/firewire/fw-sbp2.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sdma.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_sdma.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/media/dvb/pluto2/pluto2.c2
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/net/arm/ep93xx_eth.c4
-rw-r--r--drivers/net/bnx2x_main.c4
-rw-r--r--drivers/net/cxgb3/sge.c2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/ethtool.c4
-rw-r--r--drivers/net/e1000e/netdev.c11
-rw-r--r--drivers/net/ibmveth.c38
-rw-r--r--drivers/net/iseries_veth.c4
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/pasemi_mac.c6
-rw-r--r--drivers/net/qla3xxx.c12
-rw-r--r--drivers/net/s2io.c48
-rw-r--r--drivers/net/sfc/rx.c4
-rw-r--r--drivers/net/sfc/tx.c7
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/tc35815.c4
-rw-r--r--drivers/net/wireless/ath5k/base.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/ibmvscsi/rpa_vscsi.c2
-rw-r--r--drivers/spi/atmel_spi.c4
-rw-r--r--drivers/spi/au1550_spi.c6
-rw-r--r--drivers/spi/omap2_mcspi.c4
-rw-r--r--drivers/spi/pxa2xx_spi.c4
-rw-r--r--drivers/spi/spi_imx.c6
34 files changed, 117 insertions, 107 deletions
diff --git a/drivers/firewire/fw-iso.c b/drivers/firewire/fw-iso.c
index bcbe794a3ea5..e14c03dc0065 100644
--- a/drivers/firewire/fw-iso.c
+++ b/drivers/firewire/fw-iso.c
@@ -50,7 +50,7 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
50 50
51 address = dma_map_page(card->device, buffer->pages[i], 51 address = dma_map_page(card->device, buffer->pages[i],
52 0, PAGE_SIZE, direction); 52 0, PAGE_SIZE, direction);
53 if (dma_mapping_error(address)) { 53 if (dma_mapping_error(card->device, address)) {
54 __free_page(buffer->pages[i]); 54 __free_page(buffer->pages[i]);
55 goto out_pages; 55 goto out_pages;
56 } 56 }
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 333b12544dd1..566672e0bcff 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -953,7 +953,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
953 payload_bus = 953 payload_bus =
954 dma_map_single(ohci->card.device, packet->payload, 954 dma_map_single(ohci->card.device, packet->payload,
955 packet->payload_length, DMA_TO_DEVICE); 955 packet->payload_length, DMA_TO_DEVICE);
956 if (dma_mapping_error(payload_bus)) { 956 if (dma_mapping_error(ohci->card.device, payload_bus)) {
957 packet->ack = RCODE_SEND_ERROR; 957 packet->ack = RCODE_SEND_ERROR;
958 return -1; 958 return -1;
959 } 959 }
diff --git a/drivers/firewire/fw-sbp2.c b/drivers/firewire/fw-sbp2.c
index 53fc5a641e6d..aaff50ebba1d 100644
--- a/drivers/firewire/fw-sbp2.c
+++ b/drivers/firewire/fw-sbp2.c
@@ -543,7 +543,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
543 orb->response_bus = 543 orb->response_bus =
544 dma_map_single(device->card->device, &orb->response, 544 dma_map_single(device->card->device, &orb->response,
545 sizeof(orb->response), DMA_FROM_DEVICE); 545 sizeof(orb->response), DMA_FROM_DEVICE);
546 if (dma_mapping_error(orb->response_bus)) 546 if (dma_mapping_error(device->card->device, orb->response_bus))
547 goto fail_mapping_response; 547 goto fail_mapping_response;
548 548
549 orb->request.response.high = 0; 549 orb->request.response.high = 0;
@@ -577,7 +577,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
577 orb->base.request_bus = 577 orb->base.request_bus =
578 dma_map_single(device->card->device, &orb->request, 578 dma_map_single(device->card->device, &orb->request,
579 sizeof(orb->request), DMA_TO_DEVICE); 579 sizeof(orb->request), DMA_TO_DEVICE);
580 if (dma_mapping_error(orb->base.request_bus)) 580 if (dma_mapping_error(device->card->device, orb->base.request_bus))
581 goto fail_mapping_request; 581 goto fail_mapping_request;
582 582
583 sbp2_send_orb(&orb->base, lu, node_id, generation, 583 sbp2_send_orb(&orb->base, lu, node_id, generation,
@@ -1424,7 +1424,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
1424 orb->page_table_bus = 1424 orb->page_table_bus =
1425 dma_map_single(device->card->device, orb->page_table, 1425 dma_map_single(device->card->device, orb->page_table,
1426 sizeof(orb->page_table), DMA_TO_DEVICE); 1426 sizeof(orb->page_table), DMA_TO_DEVICE);
1427 if (dma_mapping_error(orb->page_table_bus)) 1427 if (dma_mapping_error(device->card->device, orb->page_table_bus))
1428 goto fail_page_table; 1428 goto fail_page_table;
1429 1429
1430 /* 1430 /*
@@ -1509,7 +1509,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
1509 orb->base.request_bus = 1509 orb->base.request_bus =
1510 dma_map_single(device->card->device, &orb->request, 1510 dma_map_single(device->card->device, &orb->request,
1511 sizeof(orb->request), DMA_TO_DEVICE); 1511 sizeof(orb->request), DMA_TO_DEVICE);
1512 if (dma_mapping_error(orb->base.request_bus)) 1512 if (dma_mapping_error(device->card->device, orb->base.request_bus))
1513 goto out; 1513 goto out;
1514 1514
1515 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation, 1515 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
diff --git a/drivers/infiniband/hw/ipath/ipath_sdma.c b/drivers/infiniband/hw/ipath/ipath_sdma.c
index eaba03273e4f..284c9bca517e 100644
--- a/drivers/infiniband/hw/ipath/ipath_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_sdma.c
@@ -698,7 +698,7 @@ retry:
698 698
699 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, 699 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
700 tx->map_len, DMA_TO_DEVICE); 700 tx->map_len, DMA_TO_DEVICE);
701 if (dma_mapping_error(addr)) { 701 if (dma_mapping_error(&dd->pcidev->dev, addr)) {
702 ret = -EIO; 702 ret = -EIO;
703 goto unlock; 703 goto unlock;
704 } 704 }
diff --git a/drivers/infiniband/hw/ipath/ipath_user_sdma.c b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
index 86e016916cd1..82d9a0b5ca2f 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_sdma.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_sdma.c
@@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
206 206
207 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, 207 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
208 DMA_TO_DEVICE); 208 DMA_TO_DEVICE);
209 if (dma_mapping_error(dma_addr)) { 209 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
210 ret = -ENOMEM; 210 ret = -ENOMEM;
211 goto free_unmap; 211 goto free_unmap;
212 } 212 }
@@ -301,7 +301,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
301 pages[j], 0, flen, DMA_TO_DEVICE); 301 pages[j], 0, flen, DMA_TO_DEVICE);
302 unsigned long fofs = addr & ~PAGE_MASK; 302 unsigned long fofs = addr & ~PAGE_MASK;
303 303
304 if (dma_mapping_error(dma_addr)) { 304 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
305 ret = -ENOMEM; 305 ret = -ENOMEM;
306 goto done; 306 goto done;
307 } 307 }
@@ -508,7 +508,7 @@ static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
508 if (page) { 508 if (page) {
509 dma_addr = dma_map_page(&dd->pcidev->dev, 509 dma_addr = dma_map_page(&dd->pcidev->dev,
510 page, 0, len, DMA_TO_DEVICE); 510 page, 0, len, DMA_TO_DEVICE);
511 if (dma_mapping_error(dma_addr)) { 511 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
512 ret = -ENOMEM; 512 ret = -ENOMEM;
513 goto free_pbc; 513 goto free_pbc;
514 } 514 }
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 4e36aa7cb3d2..cc6858f0b65b 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -780,7 +780,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
780 return -ENOMEM; 780 return -ENOMEM;
781 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, 781 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
782 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 782 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
783 if (pci_dma_mapping_error(dev->eq_table.icm_dma)) { 783 if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
784 __free_page(dev->eq_table.icm_page); 784 __free_page(dev->eq_table.icm_page);
785 return -ENOMEM; 785 return -ENOMEM;
786 } 786 }
diff --git a/drivers/media/dvb/pluto2/pluto2.c b/drivers/media/dvb/pluto2/pluto2.c
index 1360403b88b6..a9653c63f4db 100644
--- a/drivers/media/dvb/pluto2/pluto2.c
+++ b/drivers/media/dvb/pluto2/pluto2.c
@@ -242,7 +242,7 @@ static int __devinit pluto_dma_map(struct pluto *pluto)
242 pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf, 242 pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
243 TS_DMA_BYTES, PCI_DMA_FROMDEVICE); 243 TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
244 244
245 return pci_dma_mapping_error(pluto->dma_addr); 245 return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
246} 246}
247 247
248static void pluto_dma_unmap(struct pluto *pluto) 248static void pluto_dma_unmap(struct pluto *pluto)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c3a5db72ddd7..5f95e10229b5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -337,7 +337,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
337 337
338 host->align_addr = dma_map_single(mmc_dev(host->mmc), 338 host->align_addr = dma_map_single(mmc_dev(host->mmc),
339 host->align_buffer, 128 * 4, direction); 339 host->align_buffer, 128 * 4, direction);
340 if (dma_mapping_error(host->align_addr)) 340 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
341 goto fail; 341 goto fail;
342 BUG_ON(host->align_addr & 0x3); 342 BUG_ON(host->align_addr & 0x3);
343 343
@@ -439,7 +439,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
439 439
440 host->adma_addr = dma_map_single(mmc_dev(host->mmc), 440 host->adma_addr = dma_map_single(mmc_dev(host->mmc),
441 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); 441 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
442 if (dma_mapping_error(host->align_addr)) 442 if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
443 goto unmap_entries; 443 goto unmap_entries;
444 BUG_ON(host->adma_addr & 0x3); 444 BUG_ON(host->adma_addr & 0x3);
445 445
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 7a14980f3472..18d3eeb7eab2 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
482 goto err; 482 goto err;
483 483
484 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); 484 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
485 if (dma_mapping_error(d)) { 485 if (dma_mapping_error(NULL, d)) {
486 free_page((unsigned long)page); 486 free_page((unsigned long)page);
487 goto err; 487 goto err;
488 } 488 }
@@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
505 goto err; 505 goto err;
506 506
507 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); 507 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
508 if (dma_mapping_error(d)) { 508 if (dma_mapping_error(NULL, d)) {
509 free_page((unsigned long)page); 509 free_page((unsigned long)page);
510 goto err; 510 goto err;
511 } 511 }
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 0263bef9cc6d..c7cc760a1777 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1020 1020
1021 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, 1021 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1022 PCI_DMA_FROMDEVICE); 1022 PCI_DMA_FROMDEVICE);
1023 if (unlikely(dma_mapping_error(mapping))) { 1023 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1024 __free_pages(page, PAGES_PER_SGE_SHIFT); 1024 __free_pages(page, PAGES_PER_SGE_SHIFT);
1025 return -ENOMEM; 1025 return -ENOMEM;
1026 } 1026 }
@@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1048 1048
1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1050 PCI_DMA_FROMDEVICE); 1050 PCI_DMA_FROMDEVICE);
1051 if (unlikely(dma_mapping_error(mapping))) { 1051 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1052 dev_kfree_skb(skb); 1052 dev_kfree_skb(skb);
1053 return -ENOMEM; 1053 return -ENOMEM;
1054 } 1054 }
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a96331c875e6..1b0861d73ab7 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
386 dma_addr_t mapping; 386 dma_addr_t mapping;
387 387
388 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); 388 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
389 if (unlikely(pci_dma_mapping_error(mapping))) 389 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
390 return -ENOMEM; 390 return -ENOMEM;
391 391
392 pci_unmap_addr_set(sd, dma_addr, mapping); 392 pci_unmap_addr_set(sd, dma_addr, mapping);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 1037b1332312..19d32a227be1 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1790 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1790 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1791 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1791 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
1792 1792
1793 if (pci_dma_mapping_error(rx->dma_addr)) { 1793 if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
1794 dev_kfree_skb_any(rx->skb); 1794 dev_kfree_skb_any(rx->skb);
1795 rx->skb = NULL; 1795 rx->skb = NULL;
1796 rx->dma_addr = 0; 1796 rx->dma_addr = 0;
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index a14561f40db0..9350564065e7 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1090 tx_ring->buffer_info[i].dma = 1090 tx_ring->buffer_info[i].dma =
1091 pci_map_single(pdev, skb->data, skb->len, 1091 pci_map_single(pdev, skb->data, skb->len,
1092 PCI_DMA_TODEVICE); 1092 PCI_DMA_TODEVICE);
1093 if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { 1093 if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
1094 ret_val = 4; 1094 ret_val = 4;
1095 goto err_nomem; 1095 goto err_nomem;
1096 } 1096 }
@@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1153 rx_ring->buffer_info[i].dma = 1153 rx_ring->buffer_info[i].dma =
1154 pci_map_single(pdev, skb->data, 2048, 1154 pci_map_single(pdev, skb->data, 2048,
1155 PCI_DMA_FROMDEVICE); 1155 PCI_DMA_FROMDEVICE);
1156 if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { 1156 if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
1157 ret_val = 8; 1157 ret_val = 8;
1158 goto err_nomem; 1158 goto err_nomem;
1159 } 1159 }
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 9c0f56b3c518..d13677899767 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -195,7 +195,7 @@ map_skb:
195 buffer_info->dma = pci_map_single(pdev, skb->data, 195 buffer_info->dma = pci_map_single(pdev, skb->data,
196 adapter->rx_buffer_len, 196 adapter->rx_buffer_len,
197 PCI_DMA_FROMDEVICE); 197 PCI_DMA_FROMDEVICE);
198 if (pci_dma_mapping_error(buffer_info->dma)) { 198 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
199 dev_err(&pdev->dev, "RX DMA map failed\n"); 199 dev_err(&pdev->dev, "RX DMA map failed\n");
200 adapter->rx_dma_failed++; 200 adapter->rx_dma_failed++;
201 break; 201 break;
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
265 ps_page->page, 265 ps_page->page,
266 0, PAGE_SIZE, 266 0, PAGE_SIZE,
267 PCI_DMA_FROMDEVICE); 267 PCI_DMA_FROMDEVICE);
268 if (pci_dma_mapping_error(ps_page->dma)) { 268 if (pci_dma_mapping_error(pdev, ps_page->dma)) {
269 dev_err(&adapter->pdev->dev, 269 dev_err(&adapter->pdev->dev,
270 "RX DMA page map failed\n"); 270 "RX DMA page map failed\n");
271 adapter->rx_dma_failed++; 271 adapter->rx_dma_failed++;
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
300 buffer_info->dma = pci_map_single(pdev, skb->data, 300 buffer_info->dma = pci_map_single(pdev, skb->data,
301 adapter->rx_ps_bsize0, 301 adapter->rx_ps_bsize0,
302 PCI_DMA_FROMDEVICE); 302 PCI_DMA_FROMDEVICE);
303 if (pci_dma_mapping_error(buffer_info->dma)) { 303 if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
304 dev_err(&pdev->dev, "RX DMA map failed\n"); 304 dev_err(&pdev->dev, "RX DMA map failed\n");
305 adapter->rx_dma_failed++; 305 adapter->rx_dma_failed++;
306 /* cleanup skb */ 306 /* cleanup skb */
@@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3344 skb->data + offset, 3344 skb->data + offset,
3345 size, 3345 size,
3346 PCI_DMA_TODEVICE); 3346 PCI_DMA_TODEVICE);
3347 if (pci_dma_mapping_error(buffer_info->dma)) { 3347 if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
3348 dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); 3348 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
3349 adapter->tx_dma_failed++; 3349 adapter->tx_dma_failed++;
3350 return -1; 3350 return -1;
@@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3382 offset, 3382 offset,
3383 size, 3383 size,
3384 PCI_DMA_TODEVICE); 3384 PCI_DMA_TODEVICE);
3385 if (pci_dma_mapping_error(buffer_info->dma)) { 3385 if (pci_dma_mapping_error(adapter->pdev,
3386 buffer_info->dma)) {
3386 dev_err(&adapter->pdev->dev, 3387 dev_err(&adapter->pdev->dev,
3387 "TX DMA page map failed\n"); 3388 "TX DMA page map failed\n");
3388 adapter->tx_dma_failed++; 3389 adapter->tx_dma_failed++;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index e5a6e2e84540..91ec9fdc7184 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
261 pool->buff_size, DMA_FROM_DEVICE); 261 pool->buff_size, DMA_FROM_DEVICE);
262 262
263 if (dma_mapping_error(dma_addr)) 263 if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
264 goto failure; 264 goto failure;
265 265
266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
@@ -294,7 +294,7 @@ failure:
294 pool->consumer_index = pool->size - 1; 294 pool->consumer_index = pool->size - 1;
295 else 295 else
296 pool->consumer_index--; 296 pool->consumer_index--;
297 if (!dma_mapping_error(dma_addr)) 297 if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
298 dma_unmap_single(&adapter->vdev->dev, 298 dma_unmap_single(&adapter->vdev->dev,
299 pool->dma_addr[index], pool->buff_size, 299 pool->dma_addr[index], pool->buff_size,
300 DMA_FROM_DEVICE); 300 DMA_FROM_DEVICE);
@@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
448static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 448static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
449{ 449{
450 int i; 450 int i;
451 struct device *dev = &adapter->vdev->dev;
451 452
452 if(adapter->buffer_list_addr != NULL) { 453 if(adapter->buffer_list_addr != NULL) {
453 if(!dma_mapping_error(adapter->buffer_list_dma)) { 454 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
454 dma_unmap_single(&adapter->vdev->dev, 455 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
455 adapter->buffer_list_dma, 4096,
456 DMA_BIDIRECTIONAL); 456 DMA_BIDIRECTIONAL);
457 adapter->buffer_list_dma = DMA_ERROR_CODE; 457 adapter->buffer_list_dma = DMA_ERROR_CODE;
458 } 458 }
@@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
461 } 461 }
462 462
463 if(adapter->filter_list_addr != NULL) { 463 if(adapter->filter_list_addr != NULL) {
464 if(!dma_mapping_error(adapter->filter_list_dma)) { 464 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
465 dma_unmap_single(&adapter->vdev->dev, 465 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
466 adapter->filter_list_dma, 4096,
467 DMA_BIDIRECTIONAL); 466 DMA_BIDIRECTIONAL);
468 adapter->filter_list_dma = DMA_ERROR_CODE; 467 adapter->filter_list_dma = DMA_ERROR_CODE;
469 } 468 }
@@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
472 } 471 }
473 472
474 if(adapter->rx_queue.queue_addr != NULL) { 473 if(adapter->rx_queue.queue_addr != NULL) {
475 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { 474 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
476 dma_unmap_single(&adapter->vdev->dev, 475 dma_unmap_single(dev,
477 adapter->rx_queue.queue_dma, 476 adapter->rx_queue.queue_dma,
478 adapter->rx_queue.queue_len, 477 adapter->rx_queue.queue_len,
479 DMA_BIDIRECTIONAL); 478 DMA_BIDIRECTIONAL);
@@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev)
535 int rc; 534 int rc;
536 union ibmveth_buf_desc rxq_desc; 535 union ibmveth_buf_desc rxq_desc;
537 int i; 536 int i;
537 struct device *dev;
538 538
539 ibmveth_debug_printk("open starting\n"); 539 ibmveth_debug_printk("open starting\n");
540 540
@@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev)
563 return -ENOMEM; 563 return -ENOMEM;
564 } 564 }
565 565
566 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, 566 dev = &adapter->vdev->dev;
567
568 adapter->buffer_list_dma = dma_map_single(dev,
567 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); 569 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
568 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, 570 adapter->filter_list_dma = dma_map_single(dev,
569 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); 571 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
570 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, 572 adapter->rx_queue.queue_dma = dma_map_single(dev,
571 adapter->rx_queue.queue_addr, 573 adapter->rx_queue.queue_addr,
572 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); 574 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
573 575
574 if((dma_mapping_error(adapter->buffer_list_dma) ) || 576 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
575 (dma_mapping_error(adapter->filter_list_dma)) || 577 (dma_mapping_error(dev, adapter->filter_list_dma)) ||
576 (dma_mapping_error(adapter->rx_queue.queue_dma))) { 578 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
577 ibmveth_error_printk("unable to map filter or buffer list pages\n"); 579 ibmveth_error_printk("unable to map filter or buffer list pages\n");
578 ibmveth_cleanup(adapter); 580 ibmveth_cleanup(adapter);
579 napi_disable(&adapter->napi); 581 napi_disable(&adapter->napi);
@@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev)
645 adapter->bounce_buffer_dma = 647 adapter->bounce_buffer_dma =
646 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 648 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
647 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 649 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
648 if (dma_mapping_error(adapter->bounce_buffer_dma)) { 650 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
649 ibmveth_error_printk("unable to map bounce buffer\n"); 651 ibmveth_error_printk("unable to map bounce buffer\n");
650 ibmveth_cleanup(adapter); 652 ibmveth_cleanup(adapter);
651 napi_disable(&adapter->napi); 653 napi_disable(&adapter->napi);
@@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
922 buf[1] = 0; 924 buf[1] = 0;
923 } 925 }
924 926
925 if (dma_mapping_error(data_dma_addr)) { 927 if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
926 if (!firmware_has_feature(FW_FEATURE_CMO)) 928 if (!firmware_has_feature(FW_FEATURE_CMO))
927 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 929 ibmveth_error_printk("tx: unable to map xmit buffer\n");
928 skb_copy_from_linear_data(skb, adapter->bounce_buffer, 930 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index b8d0639c1cdf..c46864d626b2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
1128 msg->data.addr[0] = dma_map_single(port->dev, skb->data, 1128 msg->data.addr[0] = dma_map_single(port->dev, skb->data,
1129 skb->len, DMA_TO_DEVICE); 1129 skb->len, DMA_TO_DEVICE);
1130 1130
1131 if (dma_mapping_error(msg->data.addr[0])) 1131 if (dma_mapping_error(port->dev, msg->data.addr[0]))
1132 goto recycle_and_drop; 1132 goto recycle_and_drop;
1133 1133
1134 msg->dev = port->dev; 1134 msg->dev = port->dev;
@@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx,
1226 dma_address = msg->data.addr[0]; 1226 dma_address = msg->data.addr[0];
1227 dma_length = msg->data.len[0]; 1227 dma_length = msg->data.len[0];
1228 1228
1229 if (!dma_mapping_error(dma_address)) 1229 if (!dma_mapping_error(msg->dev, dma_address))
1230 dma_unmap_single(msg->dev, dma_address, dma_length, 1230 dma_unmap_single(msg->dev, dma_address, dma_length,
1231 DMA_TO_DEVICE); 1231 DMA_TO_DEVICE);
1232 1232
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index ea3a09aaa844..7df928d3a3d8 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
526 return -ENOMEM; 526 return -ENOMEM;
527 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, 527 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
528 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 528 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
529 if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { 529 if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
530 __free_page(priv->eq_table.icm_page); 530 __free_page(priv->eq_table.icm_page);
531 return -ENOMEM; 531 return -ENOMEM;
532 } 532 }
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 993d87c9296f..edc0fd588985 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
650 mac->bufsz - LOCAL_SKB_ALIGN, 650 mac->bufsz - LOCAL_SKB_ALIGN,
651 PCI_DMA_FROMDEVICE); 651 PCI_DMA_FROMDEVICE);
652 652
653 if (unlikely(dma_mapping_error(dma))) { 653 if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
654 dev_kfree_skb_irq(info->skb); 654 dev_kfree_skb_irq(info->skb);
655 break; 655 break;
656 } 656 }
@@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1519 map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), 1519 map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
1520 PCI_DMA_TODEVICE); 1520 PCI_DMA_TODEVICE);
1521 map_size[0] = skb_headlen(skb); 1521 map_size[0] = skb_headlen(skb);
1522 if (dma_mapping_error(map[0])) 1522 if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
1523 goto out_err_nolock; 1523 goto out_err_nolock;
1524 1524
1525 for (i = 0; i < nfrags; i++) { 1525 for (i = 0; i < nfrags; i++) {
@@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
1529 frag->page_offset, frag->size, 1529 frag->page_offset, frag->size,
1530 PCI_DMA_TODEVICE); 1530 PCI_DMA_TODEVICE);
1531 map_size[i+1] = frag->size; 1531 map_size[i+1] = frag->size;
1532 if (dma_mapping_error(map[i+1])) { 1532 if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
1533 nfrags = i; 1533 nfrags = i;
1534 goto out_err_nolock; 1534 goto out_err_nolock;
1535 } 1535 }
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index e7d48a352beb..e82b37bbd6c3 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
328 qdev->lrg_buffer_len - 328 qdev->lrg_buffer_len -
329 QL_HEADER_SPACE, 329 QL_HEADER_SPACE,
330 PCI_DMA_FROMDEVICE); 330 PCI_DMA_FROMDEVICE);
331 err = pci_dma_mapping_error(map); 331 err = pci_dma_mapping_error(qdev->pdev, map);
332 if(err) { 332 if(err) {
333 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 333 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
334 qdev->ndev->name, err); 334 qdev->ndev->name, err);
@@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1919 QL_HEADER_SPACE, 1919 QL_HEADER_SPACE,
1920 PCI_DMA_FROMDEVICE); 1920 PCI_DMA_FROMDEVICE);
1921 1921
1922 err = pci_dma_mapping_error(map); 1922 err = pci_dma_mapping_error(qdev->pdev, map);
1923 if(err) { 1923 if(err) {
1924 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 1924 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1925 qdev->ndev->name, err); 1925 qdev->ndev->name, err);
@@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2454 */ 2454 */
2455 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2455 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2456 2456
2457 err = pci_dma_mapping_error(map); 2457 err = pci_dma_mapping_error(qdev->pdev, map);
2458 if(err) { 2458 if(err) {
2459 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2459 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2460 qdev->ndev->name, err); 2460 qdev->ndev->name, err);
@@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2487 sizeof(struct oal), 2487 sizeof(struct oal),
2488 PCI_DMA_TODEVICE); 2488 PCI_DMA_TODEVICE);
2489 2489
2490 err = pci_dma_mapping_error(map); 2490 err = pci_dma_mapping_error(qdev->pdev, map);
2491 if(err) { 2491 if(err) {
2492 2492
2493 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", 2493 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
@@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2514 frag->page_offset, frag->size, 2514 frag->page_offset, frag->size,
2515 PCI_DMA_TODEVICE); 2515 PCI_DMA_TODEVICE);
2516 2516
2517 err = pci_dma_mapping_error(map); 2517 err = pci_dma_mapping_error(qdev->pdev, map);
2518 if(err) { 2518 if(err) {
2519 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", 2519 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2520 qdev->ndev->name, err); 2520 qdev->ndev->name, err);
@@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2916 QL_HEADER_SPACE, 2916 QL_HEADER_SPACE,
2917 PCI_DMA_FROMDEVICE); 2917 PCI_DMA_FROMDEVICE);
2918 2918
2919 err = pci_dma_mapping_error(map); 2919 err = pci_dma_mapping_error(qdev->pdev, map);
2920 if(err) { 2920 if(err) {
2921 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2921 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2922 qdev->ndev->name, err); 2922 qdev->ndev->name, err);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 9dae40ccf048..86d77d05190a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic)
2512 * Return Value: 2512 * Return Value:
2513 * SUCCESS on success or an appropriate -ve value on failure. 2513 * SUCCESS on success or an appropriate -ve value on failure.
2514 */ 2514 */
2515 2515static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2516static int fill_rx_buffers(struct ring_info *ring, int from_card_up) 2516 int from_card_up)
2517{ 2517{
2518 struct sk_buff *skb; 2518 struct sk_buff *skb;
2519 struct RxD_t *rxdp; 2519 struct RxD_t *rxdp;
@@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2602 rxdp1->Buffer0_ptr = pci_map_single 2602 rxdp1->Buffer0_ptr = pci_map_single
2603 (ring->pdev, skb->data, size - NET_IP_ALIGN, 2603 (ring->pdev, skb->data, size - NET_IP_ALIGN,
2604 PCI_DMA_FROMDEVICE); 2604 PCI_DMA_FROMDEVICE);
2605 if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) 2605 if (pci_dma_mapping_error(nic->pdev,
2606 rxdp1->Buffer0_ptr))
2606 goto pci_map_failed; 2607 goto pci_map_failed;
2607 2608
2608 rxdp->Control_2 = 2609 rxdp->Control_2 =
@@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2636 rxdp3->Buffer0_ptr = 2637 rxdp3->Buffer0_ptr =
2637 pci_map_single(ring->pdev, ba->ba_0, 2638 pci_map_single(ring->pdev, ba->ba_0,
2638 BUF0_LEN, PCI_DMA_FROMDEVICE); 2639 BUF0_LEN, PCI_DMA_FROMDEVICE);
2639 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) 2640 if (pci_dma_mapping_error(nic->pdev,
2641 rxdp3->Buffer0_ptr))
2640 goto pci_map_failed; 2642 goto pci_map_failed;
2641 } else 2643 } else
2642 pci_dma_sync_single_for_device(ring->pdev, 2644 pci_dma_sync_single_for_device(ring->pdev,
@@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2655 (ring->pdev, skb->data, ring->mtu + 4, 2657 (ring->pdev, skb->data, ring->mtu + 4,
2656 PCI_DMA_FROMDEVICE); 2658 PCI_DMA_FROMDEVICE);
2657 2659
2658 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) 2660 if (pci_dma_mapping_error(nic->pdev,
2661 rxdp3->Buffer2_ptr))
2659 goto pci_map_failed; 2662 goto pci_map_failed;
2660 2663
2661 if (from_card_up) { 2664 if (from_card_up) {
@@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
2664 ba->ba_1, BUF1_LEN, 2667 ba->ba_1, BUF1_LEN,
2665 PCI_DMA_FROMDEVICE); 2668 PCI_DMA_FROMDEVICE);
2666 2669
2667 if (pci_dma_mapping_error 2670 if (pci_dma_mapping_error(nic->pdev,
2668 (rxdp3->Buffer1_ptr)) { 2671 rxdp3->Buffer1_ptr)) {
2669 pci_unmap_single 2672 pci_unmap_single
2670 (ring->pdev, 2673 (ring->pdev,
2671 (dma_addr_t)(unsigned long) 2674 (dma_addr_t)(unsigned long)
@@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp)
2806 } 2809 }
2807} 2810}
2808 2811
2809static int s2io_chk_rx_buffers(struct ring_info *ring) 2812static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2810{ 2813{
2811 if (fill_rx_buffers(ring, 0) == -ENOMEM) { 2814 if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2812 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); 2815 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2813 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 2816 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2814 } 2817 }
@@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
2848 return 0; 2851 return 0;
2849 2852
2850 pkts_processed = rx_intr_handler(ring, budget); 2853 pkts_processed = rx_intr_handler(ring, budget);
2851 s2io_chk_rx_buffers(ring); 2854 s2io_chk_rx_buffers(nic, ring);
2852 2855
2853 if (pkts_processed < budget_org) { 2856 if (pkts_processed < budget_org) {
2854 netif_rx_complete(dev, napi); 2857 netif_rx_complete(dev, napi);
@@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2882 for (i = 0; i < config->rx_ring_num; i++) { 2885 for (i = 0; i < config->rx_ring_num; i++) {
2883 ring = &mac_control->rings[i]; 2886 ring = &mac_control->rings[i];
2884 ring_pkts_processed = rx_intr_handler(ring, budget); 2887 ring_pkts_processed = rx_intr_handler(ring, budget);
2885 s2io_chk_rx_buffers(ring); 2888 s2io_chk_rx_buffers(nic, ring);
2886 pkts_processed += ring_pkts_processed; 2889 pkts_processed += ring_pkts_processed;
2887 budget -= ring_pkts_processed; 2890 budget -= ring_pkts_processed;
2888 if (budget <= 0) 2891 if (budget <= 0)
@@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev)
2939 rx_intr_handler(&mac_control->rings[i], 0); 2942 rx_intr_handler(&mac_control->rings[i], 0);
2940 2943
2941 for (i = 0; i < config->rx_ring_num; i++) { 2944 for (i = 0; i < config->rx_ring_num; i++) {
2942 if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) { 2945 if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
2946 -ENOMEM) {
2943 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2947 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2944 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); 2948 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2945 break; 2949 break;
@@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4235 txdp->Buffer_Pointer = pci_map_single(sp->pdev, 4239 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4236 fifo->ufo_in_band_v, 4240 fifo->ufo_in_band_v,
4237 sizeof(u64), PCI_DMA_TODEVICE); 4241 sizeof(u64), PCI_DMA_TODEVICE);
4238 if (pci_dma_mapping_error(txdp->Buffer_Pointer)) 4242 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4239 goto pci_map_failed; 4243 goto pci_map_failed;
4240 txdp++; 4244 txdp++;
4241 } 4245 }
4242 4246
4243 txdp->Buffer_Pointer = pci_map_single 4247 txdp->Buffer_Pointer = pci_map_single
4244 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 4248 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4245 if (pci_dma_mapping_error(txdp->Buffer_Pointer)) 4249 if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4246 goto pci_map_failed; 4250 goto pci_map_failed;
4247 4251
4248 txdp->Host_Control = (unsigned long) skb; 4252 txdp->Host_Control = (unsigned long) skb;
@@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4345 netif_rx_schedule(dev, &ring->napi); 4349 netif_rx_schedule(dev, &ring->napi);
4346 } else { 4350 } else {
4347 rx_intr_handler(ring, 0); 4351 rx_intr_handler(ring, 0);
4348 s2io_chk_rx_buffers(ring); 4352 s2io_chk_rx_buffers(sp, ring);
4349 } 4353 }
4350 4354
4351 return IRQ_HANDLED; 4355 return IRQ_HANDLED;
@@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4826 */ 4830 */
4827 if (!config->napi) { 4831 if (!config->napi) {
4828 for (i = 0; i < config->rx_ring_num; i++) 4832 for (i = 0; i < config->rx_ring_num; i++)
4829 s2io_chk_rx_buffers(&mac_control->rings[i]); 4833 s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
4830 } 4834 }
4831 writeq(sp->general_int_mask, &bar0->general_int_mask); 4835 writeq(sp->general_int_mask, &bar0->general_int_mask);
4832 readl(&bar0->general_int_status); 4836 readl(&bar0->general_int_status);
@@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6859 pci_map_single( sp->pdev, (*skb)->data, 6863 pci_map_single( sp->pdev, (*skb)->data,
6860 size - NET_IP_ALIGN, 6864 size - NET_IP_ALIGN,
6861 PCI_DMA_FROMDEVICE); 6865 PCI_DMA_FROMDEVICE);
6862 if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) 6866 if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
6863 goto memalloc_failed; 6867 goto memalloc_failed;
6864 rxdp->Host_Control = (unsigned long) (*skb); 6868 rxdp->Host_Control = (unsigned long) (*skb);
6865 } 6869 }
@@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6886 pci_map_single(sp->pdev, (*skb)->data, 6890 pci_map_single(sp->pdev, (*skb)->data,
6887 dev->mtu + 4, 6891 dev->mtu + 4,
6888 PCI_DMA_FROMDEVICE); 6892 PCI_DMA_FROMDEVICE);
6889 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) 6893 if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
6890 goto memalloc_failed; 6894 goto memalloc_failed;
6891 rxdp3->Buffer0_ptr = *temp0 = 6895 rxdp3->Buffer0_ptr = *temp0 =
6892 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, 6896 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6893 PCI_DMA_FROMDEVICE); 6897 PCI_DMA_FROMDEVICE);
6894 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { 6898 if (pci_dma_mapping_error(sp->pdev,
6899 rxdp3->Buffer0_ptr)) {
6895 pci_unmap_single (sp->pdev, 6900 pci_unmap_single (sp->pdev,
6896 (dma_addr_t)rxdp3->Buffer2_ptr, 6901 (dma_addr_t)rxdp3->Buffer2_ptr,
6897 dev->mtu + 4, PCI_DMA_FROMDEVICE); 6902 dev->mtu + 4, PCI_DMA_FROMDEVICE);
@@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6903 rxdp3->Buffer1_ptr = *temp1 = 6908 rxdp3->Buffer1_ptr = *temp1 =
6904 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, 6909 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6905 PCI_DMA_FROMDEVICE); 6910 PCI_DMA_FROMDEVICE);
6906 if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { 6911 if (pci_dma_mapping_error(sp->pdev,
6912 rxdp3->Buffer1_ptr)) {
6907 pci_unmap_single (sp->pdev, 6913 pci_unmap_single (sp->pdev,
6908 (dma_addr_t)rxdp3->Buffer0_ptr, 6914 (dma_addr_t)rxdp3->Buffer0_ptr,
6909 BUF0_LEN, PCI_DMA_FROMDEVICE); 6915 BUF0_LEN, PCI_DMA_FROMDEVICE);
@@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp)
7187 7193
7188 for (i = 0; i < config->rx_ring_num; i++) { 7194 for (i = 0; i < config->rx_ring_num; i++) {
7189 mac_control->rings[i].mtu = dev->mtu; 7195 mac_control->rings[i].mtu = dev->mtu;
7190 ret = fill_rx_buffers(&mac_control->rings[i], 1); 7196 ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
7191 if (ret) { 7197 if (ret) {
7192 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7198 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7193 dev->name); 7199 dev->name);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 601b001437c0..0d27dd39bc09 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
233 rx_buf->data, rx_buf->len, 233 rx_buf->data, rx_buf->len,
234 PCI_DMA_FROMDEVICE); 234 PCI_DMA_FROMDEVICE);
235 235
236 if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { 236 if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
237 dev_kfree_skb_any(rx_buf->skb); 237 dev_kfree_skb_any(rx_buf->skb);
238 rx_buf->skb = NULL; 238 rx_buf->skb = NULL;
239 return -EIO; 239 return -EIO;
@@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
275 0, efx_rx_buf_size(efx), 275 0, efx_rx_buf_size(efx),
276 PCI_DMA_FROMDEVICE); 276 PCI_DMA_FROMDEVICE);
277 277
278 if (unlikely(pci_dma_mapping_error(dma_addr))) { 278 if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
279 __free_pages(rx_buf->page, efx->rx_buffer_order); 279 __free_pages(rx_buf->page, efx->rx_buffer_order);
280 rx_buf->page = NULL; 280 rx_buf->page = NULL;
281 return -EIO; 281 return -EIO;
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 5cdd082ab8f6..5e8374ab28ee 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
172 172
173 /* Process all fragments */ 173 /* Process all fragments */
174 while (1) { 174 while (1) {
175 if (unlikely(pci_dma_mapping_error(dma_addr))) 175 if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
176 goto pci_err; 176 goto pci_err;
177 177
178 /* Store fields for marking in the per-fragment final 178 /* Store fields for marking in the per-fragment final
@@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
661 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, 661 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
662 TSOH_BUFFER(tsoh), header_len, 662 TSOH_BUFFER(tsoh), header_len,
663 PCI_DMA_TODEVICE); 663 PCI_DMA_TODEVICE);
664 if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { 664 if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
665 tsoh->dma_addr))) {
665 kfree(tsoh); 666 kfree(tsoh);
666 return NULL; 667 return NULL;
667 } 668 }
@@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
863 864
864 st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, 865 st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
865 len, PCI_DMA_TODEVICE); 866 len, PCI_DMA_TODEVICE);
866 if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { 867 if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
867 st->ifc.unmap_len = len; 868 st->ifc.unmap_len = len;
868 st->ifc.len = len; 869 st->ifc.len = len;
869 st->ifc.dma_addr = st->ifc.unmap_addr; 870 st->ifc.dma_addr = st->ifc.unmap_addr;
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 00aa0b108cb9..b6435d0d71f9 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
452 /* iommu-map the skb */ 452 /* iommu-map the skb */
453 buf = pci_map_single(card->pdev, descr->skb->data, 453 buf = pci_map_single(card->pdev, descr->skb->data,
454 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 454 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
455 if (pci_dma_mapping_error(buf)) { 455 if (pci_dma_mapping_error(card->pdev, buf)) {
456 dev_kfree_skb_any(descr->skb); 456 dev_kfree_skb_any(descr->skb);
457 descr->skb = NULL; 457 descr->skb = NULL;
458 if (netif_msg_rx_err(card) && net_ratelimit()) 458 if (netif_msg_rx_err(card) && net_ratelimit())
@@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
691 unsigned long flags; 691 unsigned long flags;
692 692
693 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 693 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
694 if (pci_dma_mapping_error(buf)) { 694 if (pci_dma_mapping_error(card->pdev, buf)) {
695 if (netif_msg_tx_err(card) && net_ratelimit()) 695 if (netif_msg_tx_err(card) && net_ratelimit())
696 dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " 696 dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
697 "Dropping packet\n", skb->data, skb->len); 697 "Dropping packet\n", skb->data, skb->len);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index a645e5028c14..8487ace9d2e3 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
506 return NULL; 506 return NULL;
507 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, 507 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
508 PCI_DMA_FROMDEVICE); 508 PCI_DMA_FROMDEVICE);
509 if (pci_dma_mapping_error(*dma_handle)) { 509 if (pci_dma_mapping_error(hwdev, *dma_handle)) {
510 free_page((unsigned long)buf); 510 free_page((unsigned long)buf);
511 return NULL; 511 return NULL;
512 } 512 }
@@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
536 return NULL; 536 return NULL;
537 *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, 537 *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
538 PCI_DMA_FROMDEVICE); 538 PCI_DMA_FROMDEVICE);
539 if (pci_dma_mapping_error(*dma_handle)) { 539 if (pci_dma_mapping_error(hwdev, *dma_handle)) {
540 dev_kfree_skb_any(skb); 540 dev_kfree_skb_any(skb);
541 return NULL; 541 return NULL;
542 } 542 }
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 217d506527a9..d9769c527346 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1166 bf->skb = skb; 1166 bf->skb = skb;
1167 bf->skbaddr = pci_map_single(sc->pdev, 1167 bf->skbaddr = pci_map_single(sc->pdev,
1168 skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); 1168 skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
1169 if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { 1169 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
1170 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); 1170 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
1171 dev_kfree_skb(skb); 1171 dev_kfree_skb(skb);
1172 bf->skb = NULL; 1172 bf->skb = NULL;
@@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1918 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " 1918 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1919 "skbaddr %llx\n", skb, skb->data, skb->len, 1919 "skbaddr %llx\n", skb, skb->data, skb->len,
1920 (unsigned long long)bf->skbaddr); 1920 (unsigned long long)bf->skbaddr);
1921 if (pci_dma_mapping_error(bf->skbaddr)) { 1921 if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
1922 ATH5K_ERR(sc, "beacon DMA mapping failed\n"); 1922 ATH5K_ERR(sc, "beacon DMA mapping failed\n");
1923 return -EIO; 1923 return -EIO;
1924 } 1924 }
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index c4a7c06793c5..61f8fdea2d96 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -3525,7 +3525,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
3525 crq->msg_token = dma_map_single(dev, crq->msgs, 3525 crq->msg_token = dma_map_single(dev, crq->msgs,
3526 PAGE_SIZE, DMA_BIDIRECTIONAL); 3526 PAGE_SIZE, DMA_BIDIRECTIONAL);
3527 3527
3528 if (dma_mapping_error(crq->msg_token)) 3528 if (dma_mapping_error(dev, crq->msg_token))
3529 goto map_failed; 3529 goto map_failed;
3530 3530
3531 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, 3531 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
@@ -3618,7 +3618,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
3618 async_q->size * sizeof(*async_q->msgs), 3618 async_q->size * sizeof(*async_q->msgs),
3619 DMA_BIDIRECTIONAL); 3619 DMA_BIDIRECTIONAL);
3620 3620
3621 if (dma_mapping_error(async_q->msg_token)) { 3621 if (dma_mapping_error(dev, async_q->msg_token)) {
3622 dev_err(dev, "Failed to map async queue\n"); 3622 dev_err(dev, "Failed to map async queue\n");
3623 goto free_async_crq; 3623 goto free_async_crq;
3624 } 3624 }
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 20000ec79b04..6b24b9cdb04c 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -859,7 +859,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
859 sizeof(hostdata->madapter_info), 859 sizeof(hostdata->madapter_info),
860 DMA_BIDIRECTIONAL); 860 DMA_BIDIRECTIONAL);
861 861
862 if (dma_mapping_error(req->buffer)) { 862 if (dma_mapping_error(hostdata->dev, req->buffer)) {
863 if (!firmware_has_feature(FW_FEATURE_CMO)) 863 if (!firmware_has_feature(FW_FEATURE_CMO))
864 dev_err(hostdata->dev, 864 dev_err(hostdata->dev,
865 "Unable to map request_buffer for " 865 "Unable to map request_buffer for "
@@ -1407,7 +1407,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
1407 length, 1407 length,
1408 DMA_BIDIRECTIONAL); 1408 DMA_BIDIRECTIONAL);
1409 1409
1410 if (dma_mapping_error(host_config->buffer)) { 1410 if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
1411 if (!firmware_has_feature(FW_FEATURE_CMO)) 1411 if (!firmware_has_feature(FW_FEATURE_CMO))
1412 dev_err(hostdata->dev, 1412 dev_err(hostdata->dev,
1413 "dma_mapping error getting host config\n"); 1413 "dma_mapping error getting host config\n");
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 3b9514c8f1f1..2e13ec00172a 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -564,7 +564,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
564 queue->size * sizeof(*queue->msgs), 564 queue->size * sizeof(*queue->msgs),
565 DMA_BIDIRECTIONAL); 565 DMA_BIDIRECTIONAL);
566 566
567 if (dma_mapping_error(queue->msg_token)) 567 if (dma_mapping_error(target->dev, queue->msg_token))
568 goto map_failed; 568 goto map_failed;
569 569
570 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, 570 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 182146100dc1..462a8574dad9 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -253,7 +253,7 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
253 queue->size * sizeof(*queue->msgs), 253 queue->size * sizeof(*queue->msgs),
254 DMA_BIDIRECTIONAL); 254 DMA_BIDIRECTIONAL);
255 255
256 if (dma_mapping_error(queue->msg_token)) 256 if (dma_mapping_error(hostdata->dev, queue->msg_token))
257 goto map_failed; 257 goto map_failed;
258 258
259 gather_partition_info(); 259 gather_partition_info();
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index e81d59d78910..0c7165660853 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -313,14 +313,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
313 xfer->tx_dma = dma_map_single(dev, 313 xfer->tx_dma = dma_map_single(dev,
314 (void *) xfer->tx_buf, xfer->len, 314 (void *) xfer->tx_buf, xfer->len,
315 DMA_TO_DEVICE); 315 DMA_TO_DEVICE);
316 if (dma_mapping_error(xfer->tx_dma)) 316 if (dma_mapping_error(dev, xfer->tx_dma))
317 return -ENOMEM; 317 return -ENOMEM;
318 } 318 }
319 if (xfer->rx_buf) { 319 if (xfer->rx_buf) {
320 xfer->rx_dma = dma_map_single(dev, 320 xfer->rx_dma = dma_map_single(dev,
321 xfer->rx_buf, xfer->len, 321 xfer->rx_buf, xfer->len,
322 DMA_FROM_DEVICE); 322 DMA_FROM_DEVICE);
323 if (dma_mapping_error(xfer->rx_dma)) { 323 if (dma_mapping_error(dev, xfer->rx_dma)) {
324 if (xfer->tx_buf) 324 if (xfer->tx_buf)
325 dma_unmap_single(dev, 325 dma_unmap_single(dev,
326 xfer->tx_dma, xfer->len, 326 xfer->tx_dma, xfer->len,
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index 9149689c79d9..87b73e0169c5 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -334,7 +334,7 @@ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size)
334 hw->dma_rx_tmpbuf_size = size; 334 hw->dma_rx_tmpbuf_size = size;
335 hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, 335 hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
336 size, DMA_FROM_DEVICE); 336 size, DMA_FROM_DEVICE);
337 if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) { 337 if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
338 kfree(hw->dma_rx_tmpbuf); 338 kfree(hw->dma_rx_tmpbuf);
339 hw->dma_rx_tmpbuf = 0; 339 hw->dma_rx_tmpbuf = 0;
340 hw->dma_rx_tmpbuf_size = 0; 340 hw->dma_rx_tmpbuf_size = 0;
@@ -378,7 +378,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
378 dma_rx_addr = dma_map_single(hw->dev, 378 dma_rx_addr = dma_map_single(hw->dev,
379 (void *)t->rx_buf, 379 (void *)t->rx_buf,
380 t->len, DMA_FROM_DEVICE); 380 t->len, DMA_FROM_DEVICE);
381 if (dma_mapping_error(dma_rx_addr)) 381 if (dma_mapping_error(hw->dev, dma_rx_addr))
382 dev_err(hw->dev, "rx dma map error\n"); 382 dev_err(hw->dev, "rx dma map error\n");
383 } 383 }
384 } else { 384 } else {
@@ -401,7 +401,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
401 dma_tx_addr = dma_map_single(hw->dev, 401 dma_tx_addr = dma_map_single(hw->dev,
402 (void *)t->tx_buf, 402 (void *)t->tx_buf,
403 t->len, DMA_TO_DEVICE); 403 t->len, DMA_TO_DEVICE);
404 if (dma_mapping_error(dma_tx_addr)) 404 if (dma_mapping_error(hw->dev, dma_tx_addr))
405 dev_err(hw->dev, "tx dma map error\n"); 405 dev_err(hw->dev, "tx dma map error\n");
406 } 406 }
407 } else { 407 } else {
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index b1cc148036c1..f6f987bb71ca 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -836,7 +836,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
836 if (tx_buf != NULL) { 836 if (tx_buf != NULL) {
837 t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, 837 t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
838 len, DMA_TO_DEVICE); 838 len, DMA_TO_DEVICE);
839 if (dma_mapping_error(t->tx_dma)) { 839 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
840 dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 840 dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
841 'T', len); 841 'T', len);
842 return -EINVAL; 842 return -EINVAL;
@@ -845,7 +845,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
845 if (rx_buf != NULL) { 845 if (rx_buf != NULL) {
846 t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, 846 t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
847 DMA_FROM_DEVICE); 847 DMA_FROM_DEVICE);
848 if (dma_mapping_error(t->rx_dma)) { 848 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
849 dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 849 dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
850 'R', len); 850 'R', len);
851 if (tx_buf != NULL) 851 if (tx_buf != NULL)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 0c452c46ab07..067299d6d192 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -353,7 +353,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
353 drv_data->rx_dma = dma_map_single(dev, drv_data->rx, 353 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
354 drv_data->rx_map_len, 354 drv_data->rx_map_len,
355 DMA_FROM_DEVICE); 355 DMA_FROM_DEVICE);
356 if (dma_mapping_error(drv_data->rx_dma)) 356 if (dma_mapping_error(dev, drv_data->rx_dma))
357 return 0; 357 return 0;
358 358
359 /* Stream map the tx buffer */ 359 /* Stream map the tx buffer */
@@ -361,7 +361,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
361 drv_data->tx_map_len, 361 drv_data->tx_map_len,
362 DMA_TO_DEVICE); 362 DMA_TO_DEVICE);
363 363
364 if (dma_mapping_error(drv_data->tx_dma)) { 364 if (dma_mapping_error(dev, drv_data->tx_dma)) {
365 dma_unmap_single(dev, drv_data->rx_dma, 365 dma_unmap_single(dev, drv_data->rx_dma,
366 drv_data->rx_map_len, DMA_FROM_DEVICE); 366 drv_data->rx_map_len, DMA_FROM_DEVICE);
367 return 0; 367 return 0;
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 54ac7bea5f8c..6fb77fcc4971 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -491,7 +491,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
491 buf, 491 buf,
492 drv_data->tx_map_len, 492 drv_data->tx_map_len,
493 DMA_TO_DEVICE); 493 DMA_TO_DEVICE);
494 if (dma_mapping_error(drv_data->tx_dma)) 494 if (dma_mapping_error(dev, drv_data->tx_dma))
495 return -1; 495 return -1;
496 496
497 drv_data->tx_dma_needs_unmap = 1; 497 drv_data->tx_dma_needs_unmap = 1;
@@ -516,7 +516,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
516 buf, 516 buf,
517 drv_data->len, 517 drv_data->len,
518 DMA_FROM_DEVICE); 518 DMA_FROM_DEVICE);
519 if (dma_mapping_error(drv_data->rx_dma)) 519 if (dma_mapping_error(dev, drv_data->rx_dma))
520 return -1; 520 return -1;
521 drv_data->rx_dma_needs_unmap = 1; 521 drv_data->rx_dma_needs_unmap = 1;
522 } 522 }
@@ -534,7 +534,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
534 buf, 534 buf,
535 drv_data->tx_map_len, 535 drv_data->tx_map_len,
536 DMA_TO_DEVICE); 536 DMA_TO_DEVICE);
537 if (dma_mapping_error(drv_data->tx_dma)) { 537 if (dma_mapping_error(dev, drv_data->tx_dma)) {
538 if (drv_data->rx_dma) { 538 if (drv_data->rx_dma) {
539 dma_unmap_single(dev, 539 dma_unmap_single(dev,
540 drv_data->rx_dma, 540 drv_data->rx_dma,