diff options
Diffstat (limited to 'drivers/net')
35 files changed, 122 insertions, 94 deletions
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index 7a14980f3472..18d3eeb7eab2 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c | |||
@@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | |||
482 | goto err; | 482 | goto err; |
483 | 483 | ||
484 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); | 484 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); |
485 | if (dma_mapping_error(d)) { | 485 | if (dma_mapping_error(NULL, d)) { |
486 | free_page((unsigned long)page); | 486 | free_page((unsigned long)page); |
487 | goto err; | 487 | goto err; |
488 | } | 488 | } |
@@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) | |||
505 | goto err; | 505 | goto err; |
506 | 506 | ||
507 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); | 507 | d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); |
508 | if (dma_mapping_error(d)) { | 508 | if (dma_mapping_error(NULL, d)) { |
509 | free_page((unsigned long)page); | 509 | free_page((unsigned long)page); |
510 | goto err; | 510 | goto err; |
511 | } | 511 | } |
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index 0263bef9cc6d..af251a5df844 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c | |||
@@ -814,7 +814,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
814 | } | 814 | } |
815 | 815 | ||
816 | /* release skb */ | 816 | /* release skb */ |
817 | BUG_TRAP(skb); | 817 | WARN_ON(!skb); |
818 | dev_kfree_skb(skb); | 818 | dev_kfree_skb(skb); |
819 | tx_buf->first_bd = 0; | 819 | tx_buf->first_bd = 0; |
820 | tx_buf->skb = NULL; | 820 | tx_buf->skb = NULL; |
@@ -837,9 +837,9 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp) | |||
837 | used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; | 837 | used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; |
838 | 838 | ||
839 | #ifdef BNX2X_STOP_ON_ERROR | 839 | #ifdef BNX2X_STOP_ON_ERROR |
840 | BUG_TRAP(used >= 0); | 840 | WARN_ON(used < 0); |
841 | BUG_TRAP(used <= fp->bp->tx_ring_size); | 841 | WARN_ON(used > fp->bp->tx_ring_size); |
842 | BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL); | 842 | WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL); |
843 | #endif | 843 | #endif |
844 | 844 | ||
845 | return (s16)(fp->bp->tx_ring_size) - used; | 845 | return (s16)(fp->bp->tx_ring_size) - used; |
@@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp, | |||
1020 | 1020 | ||
1021 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, | 1021 | mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, |
1022 | PCI_DMA_FROMDEVICE); | 1022 | PCI_DMA_FROMDEVICE); |
1023 | if (unlikely(dma_mapping_error(mapping))) { | 1023 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1024 | __free_pages(page, PAGES_PER_SGE_SHIFT); | 1024 | __free_pages(page, PAGES_PER_SGE_SHIFT); |
1025 | return -ENOMEM; | 1025 | return -ENOMEM; |
1026 | } | 1026 | } |
@@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, | |||
1048 | 1048 | ||
1049 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, | 1049 | mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, |
1050 | PCI_DMA_FROMDEVICE); | 1050 | PCI_DMA_FROMDEVICE); |
1051 | if (unlikely(dma_mapping_error(mapping))) { | 1051 | if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { |
1052 | dev_kfree_skb(skb); | 1052 | dev_kfree_skb(skb); |
1053 | return -ENOMEM; | 1053 | return -ENOMEM; |
1054 | } | 1054 | } |
@@ -4374,7 +4374,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
4374 | } | 4374 | } |
4375 | ring_prod = NEXT_RX_IDX(ring_prod); | 4375 | ring_prod = NEXT_RX_IDX(ring_prod); |
4376 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); | 4376 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); |
4377 | BUG_TRAP(ring_prod > i); | 4377 | WARN_ON(ring_prod <= i); |
4378 | } | 4378 | } |
4379 | 4379 | ||
4380 | fp->rx_bd_prod = ring_prod; | 4380 | fp->rx_bd_prod = ring_prod; |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 83768df27806..f1936d51b458 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -576,6 +576,18 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags) | |||
576 | list_for_each_safe(elem, tmp, &list) { | 576 | list_for_each_safe(elem, tmp, &list) { |
577 | cas_page_t *page = list_entry(elem, cas_page_t, list); | 577 | cas_page_t *page = list_entry(elem, cas_page_t, list); |
578 | 578 | ||
579 | /* | ||
580 | * With the lockless pagecache, cassini buffering scheme gets | ||
581 | * slightly less accurate: we might find that a page has an | ||
582 | * elevated reference count here, due to a speculative ref, | ||
583 | * and skip it as in-use. Ideally we would be able to reclaim | ||
584 | * it. However this would be such a rare case, it doesn't | ||
585 | * matter too much as we should pick it up the next time round. | ||
586 | * | ||
587 | * Importantly, if we find that the page has a refcount of 1 | ||
588 | * here (our refcount), then we know it is definitely not inuse | ||
589 | * so we can reuse it. | ||
590 | */ | ||
579 | if (page_count(page->buffer) > 1) | 591 | if (page_count(page->buffer) > 1) |
580 | continue; | 592 | continue; |
581 | 593 | ||
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index a96331c875e6..1b0861d73ab7 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len, | |||
386 | dma_addr_t mapping; | 386 | dma_addr_t mapping; |
387 | 387 | ||
388 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); | 388 | mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); |
389 | if (unlikely(pci_dma_mapping_error(mapping))) | 389 | if (unlikely(pci_dma_mapping_error(pdev, mapping))) |
390 | return -ENOMEM; | 390 | return -ENOMEM; |
391 | 391 | ||
392 | pci_unmap_addr_set(sd, dma_addr, mapping); | 392 | pci_unmap_addr_set(sd, dma_addr, mapping); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 1037b1332312..19d32a227be1 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1790 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, | 1790 | rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, |
1791 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); | 1791 | RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); |
1792 | 1792 | ||
1793 | if (pci_dma_mapping_error(rx->dma_addr)) { | 1793 | if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { |
1794 | dev_kfree_skb_any(rx->skb); | 1794 | dev_kfree_skb_any(rx->skb); |
1795 | rx->skb = NULL; | 1795 | rx->skb = NULL; |
1796 | rx->dma_addr = 0; | 1796 | rx->dma_addr = 0; |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index a14561f40db0..9350564065e7 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1090 | tx_ring->buffer_info[i].dma = | 1090 | tx_ring->buffer_info[i].dma = |
1091 | pci_map_single(pdev, skb->data, skb->len, | 1091 | pci_map_single(pdev, skb->data, skb->len, |
1092 | PCI_DMA_TODEVICE); | 1092 | PCI_DMA_TODEVICE); |
1093 | if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { | 1093 | if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { |
1094 | ret_val = 4; | 1094 | ret_val = 4; |
1095 | goto err_nomem; | 1095 | goto err_nomem; |
1096 | } | 1096 | } |
@@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1153 | rx_ring->buffer_info[i].dma = | 1153 | rx_ring->buffer_info[i].dma = |
1154 | pci_map_single(pdev, skb->data, 2048, | 1154 | pci_map_single(pdev, skb->data, 2048, |
1155 | PCI_DMA_FROMDEVICE); | 1155 | PCI_DMA_FROMDEVICE); |
1156 | if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { | 1156 | if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { |
1157 | ret_val = 8; | 1157 | ret_val = 8; |
1158 | goto err_nomem; | 1158 | goto err_nomem; |
1159 | } | 1159 | } |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 9c0f56b3c518..d13677899767 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -195,7 +195,7 @@ map_skb: | |||
195 | buffer_info->dma = pci_map_single(pdev, skb->data, | 195 | buffer_info->dma = pci_map_single(pdev, skb->data, |
196 | adapter->rx_buffer_len, | 196 | adapter->rx_buffer_len, |
197 | PCI_DMA_FROMDEVICE); | 197 | PCI_DMA_FROMDEVICE); |
198 | if (pci_dma_mapping_error(buffer_info->dma)) { | 198 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
199 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 199 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
200 | adapter->rx_dma_failed++; | 200 | adapter->rx_dma_failed++; |
201 | break; | 201 | break; |
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
265 | ps_page->page, | 265 | ps_page->page, |
266 | 0, PAGE_SIZE, | 266 | 0, PAGE_SIZE, |
267 | PCI_DMA_FROMDEVICE); | 267 | PCI_DMA_FROMDEVICE); |
268 | if (pci_dma_mapping_error(ps_page->dma)) { | 268 | if (pci_dma_mapping_error(pdev, ps_page->dma)) { |
269 | dev_err(&adapter->pdev->dev, | 269 | dev_err(&adapter->pdev->dev, |
270 | "RX DMA page map failed\n"); | 270 | "RX DMA page map failed\n"); |
271 | adapter->rx_dma_failed++; | 271 | adapter->rx_dma_failed++; |
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, | |||
300 | buffer_info->dma = pci_map_single(pdev, skb->data, | 300 | buffer_info->dma = pci_map_single(pdev, skb->data, |
301 | adapter->rx_ps_bsize0, | 301 | adapter->rx_ps_bsize0, |
302 | PCI_DMA_FROMDEVICE); | 302 | PCI_DMA_FROMDEVICE); |
303 | if (pci_dma_mapping_error(buffer_info->dma)) { | 303 | if (pci_dma_mapping_error(pdev, buffer_info->dma)) { |
304 | dev_err(&pdev->dev, "RX DMA map failed\n"); | 304 | dev_err(&pdev->dev, "RX DMA map failed\n"); |
305 | adapter->rx_dma_failed++; | 305 | adapter->rx_dma_failed++; |
306 | /* cleanup skb */ | 306 | /* cleanup skb */ |
@@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3344 | skb->data + offset, | 3344 | skb->data + offset, |
3345 | size, | 3345 | size, |
3346 | PCI_DMA_TODEVICE); | 3346 | PCI_DMA_TODEVICE); |
3347 | if (pci_dma_mapping_error(buffer_info->dma)) { | 3347 | if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) { |
3348 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); | 3348 | dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); |
3349 | adapter->tx_dma_failed++; | 3349 | adapter->tx_dma_failed++; |
3350 | return -1; | 3350 | return -1; |
@@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter, | |||
3382 | offset, | 3382 | offset, |
3383 | size, | 3383 | size, |
3384 | PCI_DMA_TODEVICE); | 3384 | PCI_DMA_TODEVICE); |
3385 | if (pci_dma_mapping_error(buffer_info->dma)) { | 3385 | if (pci_dma_mapping_error(adapter->pdev, |
3386 | buffer_info->dma)) { | ||
3386 | dev_err(&adapter->pdev->dev, | 3387 | dev_err(&adapter->pdev->dev, |
3387 | "TX DMA page map failed\n"); | 3388 | "TX DMA page map failed\n"); |
3388 | adapter->tx_dma_failed++; | 3389 | adapter->tx_dma_failed++; |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index e5a6e2e84540..91ec9fdc7184 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc | |||
260 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, | 260 | dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, |
261 | pool->buff_size, DMA_FROM_DEVICE); | 261 | pool->buff_size, DMA_FROM_DEVICE); |
262 | 262 | ||
263 | if (dma_mapping_error(dma_addr)) | 263 | if (dma_mapping_error((&adapter->vdev->dev, dma_addr)) |
264 | goto failure; | 264 | goto failure; |
265 | 265 | ||
266 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; | 266 | pool->free_map[free_index] = IBM_VETH_INVALID_MAP; |
@@ -294,7 +294,7 @@ failure: | |||
294 | pool->consumer_index = pool->size - 1; | 294 | pool->consumer_index = pool->size - 1; |
295 | else | 295 | else |
296 | pool->consumer_index--; | 296 | pool->consumer_index--; |
297 | if (!dma_mapping_error(dma_addr)) | 297 | if (!dma_mapping_error((&adapter->vdev->dev, dma_addr)) |
298 | dma_unmap_single(&adapter->vdev->dev, | 298 | dma_unmap_single(&adapter->vdev->dev, |
299 | pool->dma_addr[index], pool->buff_size, | 299 | pool->dma_addr[index], pool->buff_size, |
300 | DMA_FROM_DEVICE); | 300 | DMA_FROM_DEVICE); |
@@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter) | |||
448 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | 448 | static void ibmveth_cleanup(struct ibmveth_adapter *adapter) |
449 | { | 449 | { |
450 | int i; | 450 | int i; |
451 | struct device *dev = &adapter->vdev->dev; | ||
451 | 452 | ||
452 | if(adapter->buffer_list_addr != NULL) { | 453 | if(adapter->buffer_list_addr != NULL) { |
453 | if(!dma_mapping_error(adapter->buffer_list_dma)) { | 454 | if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { |
454 | dma_unmap_single(&adapter->vdev->dev, | 455 | dma_unmap_single(dev, adapter->buffer_list_dma, 4096, |
455 | adapter->buffer_list_dma, 4096, | ||
456 | DMA_BIDIRECTIONAL); | 456 | DMA_BIDIRECTIONAL); |
457 | adapter->buffer_list_dma = DMA_ERROR_CODE; | 457 | adapter->buffer_list_dma = DMA_ERROR_CODE; |
458 | } | 458 | } |
@@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
461 | } | 461 | } |
462 | 462 | ||
463 | if(adapter->filter_list_addr != NULL) { | 463 | if(adapter->filter_list_addr != NULL) { |
464 | if(!dma_mapping_error(adapter->filter_list_dma)) { | 464 | if (!dma_mapping_error(dev, adapter->filter_list_dma)) { |
465 | dma_unmap_single(&adapter->vdev->dev, | 465 | dma_unmap_single(dev, adapter->filter_list_dma, 4096, |
466 | adapter->filter_list_dma, 4096, | ||
467 | DMA_BIDIRECTIONAL); | 466 | DMA_BIDIRECTIONAL); |
468 | adapter->filter_list_dma = DMA_ERROR_CODE; | 467 | adapter->filter_list_dma = DMA_ERROR_CODE; |
469 | } | 468 | } |
@@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) | |||
472 | } | 471 | } |
473 | 472 | ||
474 | if(adapter->rx_queue.queue_addr != NULL) { | 473 | if(adapter->rx_queue.queue_addr != NULL) { |
475 | if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { | 474 | if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { |
476 | dma_unmap_single(&adapter->vdev->dev, | 475 | dma_unmap_single(dev, |
477 | adapter->rx_queue.queue_dma, | 476 | adapter->rx_queue.queue_dma, |
478 | adapter->rx_queue.queue_len, | 477 | adapter->rx_queue.queue_len, |
479 | DMA_BIDIRECTIONAL); | 478 | DMA_BIDIRECTIONAL); |
@@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
535 | int rc; | 534 | int rc; |
536 | union ibmveth_buf_desc rxq_desc; | 535 | union ibmveth_buf_desc rxq_desc; |
537 | int i; | 536 | int i; |
537 | struct device *dev; | ||
538 | 538 | ||
539 | ibmveth_debug_printk("open starting\n"); | 539 | ibmveth_debug_printk("open starting\n"); |
540 | 540 | ||
@@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev) | |||
563 | return -ENOMEM; | 563 | return -ENOMEM; |
564 | } | 564 | } |
565 | 565 | ||
566 | adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, | 566 | dev = &adapter->vdev->dev; |
567 | |||
568 | adapter->buffer_list_dma = dma_map_single(dev, | ||
567 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); | 569 | adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); |
568 | adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, | 570 | adapter->filter_list_dma = dma_map_single(dev, |
569 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); | 571 | adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); |
570 | adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, | 572 | adapter->rx_queue.queue_dma = dma_map_single(dev, |
571 | adapter->rx_queue.queue_addr, | 573 | adapter->rx_queue.queue_addr, |
572 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); | 574 | adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); |
573 | 575 | ||
574 | if((dma_mapping_error(adapter->buffer_list_dma) ) || | 576 | if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || |
575 | (dma_mapping_error(adapter->filter_list_dma)) || | 577 | (dma_mapping_error(dev, adapter->filter_list_dma)) || |
576 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { | 578 | (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { |
577 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); | 579 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); |
578 | ibmveth_cleanup(adapter); | 580 | ibmveth_cleanup(adapter); |
579 | napi_disable(&adapter->napi); | 581 | napi_disable(&adapter->napi); |
@@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
645 | adapter->bounce_buffer_dma = | 647 | adapter->bounce_buffer_dma = |
646 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, | 648 | dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, |
647 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); | 649 | netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); |
648 | if (dma_mapping_error(adapter->bounce_buffer_dma)) { | 650 | if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { |
649 | ibmveth_error_printk("unable to map bounce buffer\n"); | 651 | ibmveth_error_printk("unable to map bounce buffer\n"); |
650 | ibmveth_cleanup(adapter); | 652 | ibmveth_cleanup(adapter); |
651 | napi_disable(&adapter->napi); | 653 | napi_disable(&adapter->napi); |
@@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
922 | buf[1] = 0; | 924 | buf[1] = 0; |
923 | } | 925 | } |
924 | 926 | ||
925 | if (dma_mapping_error(data_dma_addr)) { | 927 | if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) { |
926 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 928 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
927 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); | 929 | ibmveth_error_printk("tx: unable to map xmit buffer\n"); |
928 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, | 930 | skb_copy_from_linear_data(skb, adapter->bounce_buffer, |
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index b8d0639c1cdf..c46864d626b2 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp, | |||
1128 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, | 1128 | msg->data.addr[0] = dma_map_single(port->dev, skb->data, |
1129 | skb->len, DMA_TO_DEVICE); | 1129 | skb->len, DMA_TO_DEVICE); |
1130 | 1130 | ||
1131 | if (dma_mapping_error(msg->data.addr[0])) | 1131 | if (dma_mapping_error(port->dev, msg->data.addr[0])) |
1132 | goto recycle_and_drop; | 1132 | goto recycle_and_drop; |
1133 | 1133 | ||
1134 | msg->dev = port->dev; | 1134 | msg->dev = port->dev; |
@@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx, | |||
1226 | dma_address = msg->data.addr[0]; | 1226 | dma_address = msg->data.addr[0]; |
1227 | dma_length = msg->data.len[0]; | 1227 | dma_length = msg->data.len[0]; |
1228 | 1228 | ||
1229 | if (!dma_mapping_error(dma_address)) | 1229 | if (!dma_mapping_error(msg->dev, dma_address)) |
1230 | dma_unmap_single(msg->dev, dma_address, dma_length, | 1230 | dma_unmap_single(msg->dev, dma_address, dma_length, |
1231 | DMA_TO_DEVICE); | 1231 | DMA_TO_DEVICE); |
1232 | 1232 | ||
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index f9d6b4dca180..096bca54bcf7 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c index aa9528779044..f094ee00c416 100644 --- a/drivers/net/mlx4/catas.c +++ b/drivers/net/mlx4/catas.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. |
3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c index 04d5bc69a6f8..2845a0560b84 100644 --- a/drivers/net/mlx4/cmd.c +++ b/drivers/net/mlx4/cmd.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index 95e87a2f8896..9bb50e3f8974 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
5 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 5 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | 6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
7 | * | 7 | * |
8 | * This software is available to you under a choice of one of two | 8 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index ea3a09aaa844..8a8b56135a58 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | 3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
@@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) | |||
526 | return -ENOMEM; | 526 | return -ENOMEM; |
527 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, | 527 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, |
528 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 528 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
529 | if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { | 529 | if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { |
530 | __free_page(priv->eq_table.icm_page); | 530 | __free_page(priv->eq_table.icm_page); |
531 | return -ENOMEM; | 531 | return -ENOMEM; |
532 | } | 532 | } |
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 57278224ba1e..7e32955da982 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h index fbf0e22be122..decbb5c2ad41 100644 --- a/drivers/net/mlx4/fw.h +++ b/drivers/net/mlx4/fw.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
4 | * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. | 4 | * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index 2a5bef6388fe..baf4bf66062c 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
3 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | 3 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h index 6c44edf35847..ab56a2f89b65 100644 --- a/drivers/net/mlx4/icm.h +++ b/drivers/net/mlx4/icm.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 2 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
3 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | 3 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c index 4a6c4d526f1b..0e7eb1038f9f 100644 --- a/drivers/net/mlx4/intf.c +++ b/drivers/net/mlx4/intf.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 8e1d24cda1b0..1252a919de2e 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 4 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
5 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | 5 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
6 | * | 6 | * |
7 | * This software is available to you under a choice of one of two | 7 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index b4b57870ddfd..c83f88ce0736 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 78038499cff5..5337e3ac3e78 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | 3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. | 4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. |
5 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 5 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | 6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
7 | * | 7 | * |
8 | * This software is available to you under a choice of one of two | 8 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index a3c04c5f12c2..62071d9c4a55 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
4 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | 4 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c index ee5484c44a18..c49a86044bf7 100644 --- a/drivers/net/mlx4/qp.c +++ b/drivers/net/mlx4/qp.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | 3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. |
4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | 4 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | 5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
6 | * | 6 | * |
7 | * This software is available to you under a choice of one of two | 7 | * This software is available to you under a choice of one of two |
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c index e199715fabd0..3951b884c0fb 100644 --- a/drivers/net/mlx4/reset.c +++ b/drivers/net/mlx4/reset.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c index d23f46d692ef..533eb6db24b3 100644 --- a/drivers/net/mlx4/srq.c +++ b/drivers/net/mlx4/srq.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. | 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. | ||
3 | * | 4 | * |
4 | * This software is available to you under a choice of one of two | 5 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 6 | * licenses. You may choose to be licensed under the terms of the GNU |
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 993d87c9296f..edc0fd588985 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c | |||
@@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, | |||
650 | mac->bufsz - LOCAL_SKB_ALIGN, | 650 | mac->bufsz - LOCAL_SKB_ALIGN, |
651 | PCI_DMA_FROMDEVICE); | 651 | PCI_DMA_FROMDEVICE); |
652 | 652 | ||
653 | if (unlikely(dma_mapping_error(dma))) { | 653 | if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { |
654 | dev_kfree_skb_irq(info->skb); | 654 | dev_kfree_skb_irq(info->skb); |
655 | break; | 655 | break; |
656 | } | 656 | } |
@@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1519 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), | 1519 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), |
1520 | PCI_DMA_TODEVICE); | 1520 | PCI_DMA_TODEVICE); |
1521 | map_size[0] = skb_headlen(skb); | 1521 | map_size[0] = skb_headlen(skb); |
1522 | if (dma_mapping_error(map[0])) | 1522 | if (pci_dma_mapping_error(mac->dma_pdev, map[0])) |
1523 | goto out_err_nolock; | 1523 | goto out_err_nolock; |
1524 | 1524 | ||
1525 | for (i = 0; i < nfrags; i++) { | 1525 | for (i = 0; i < nfrags; i++) { |
@@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | |||
1529 | frag->page_offset, frag->size, | 1529 | frag->page_offset, frag->size, |
1530 | PCI_DMA_TODEVICE); | 1530 | PCI_DMA_TODEVICE); |
1531 | map_size[i+1] = frag->size; | 1531 | map_size[i+1] = frag->size; |
1532 | if (dma_mapping_error(map[i+1])) { | 1532 | if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) { |
1533 | nfrags = i; | 1533 | nfrags = i; |
1534 | goto out_err_nolock; | 1534 | goto out_err_nolock; |
1535 | } | 1535 | } |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 739b3ab7bccc..ddccc074a76a 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -581,12 +581,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
581 | if (file == ppp->owner) | 581 | if (file == ppp->owner) |
582 | ppp_shutdown_interface(ppp); | 582 | ppp_shutdown_interface(ppp); |
583 | } | 583 | } |
584 | if (atomic_read(&file->f_count) <= 2) { | 584 | if (atomic_long_read(&file->f_count) <= 2) { |
585 | ppp_release(NULL, file); | 585 | ppp_release(NULL, file); |
586 | err = 0; | 586 | err = 0; |
587 | } else | 587 | } else |
588 | printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", | 588 | printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", |
589 | atomic_read(&file->f_count)); | 589 | atomic_long_read(&file->f_count)); |
590 | unlock_kernel(); | 590 | unlock_kernel(); |
591 | return err; | 591 | return err; |
592 | } | 592 | } |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index e7d48a352beb..e82b37bbd6c3 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, | |||
328 | qdev->lrg_buffer_len - | 328 | qdev->lrg_buffer_len - |
329 | QL_HEADER_SPACE, | 329 | QL_HEADER_SPACE, |
330 | PCI_DMA_FROMDEVICE); | 330 | PCI_DMA_FROMDEVICE); |
331 | err = pci_dma_mapping_error(map); | 331 | err = pci_dma_mapping_error(qdev->pdev, map); |
332 | if(err) { | 332 | if(err) { |
333 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 333 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
334 | qdev->ndev->name, err); | 334 | qdev->ndev->name, err); |
@@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) | |||
1919 | QL_HEADER_SPACE, | 1919 | QL_HEADER_SPACE, |
1920 | PCI_DMA_FROMDEVICE); | 1920 | PCI_DMA_FROMDEVICE); |
1921 | 1921 | ||
1922 | err = pci_dma_mapping_error(map); | 1922 | err = pci_dma_mapping_error(qdev->pdev, map); |
1923 | if(err) { | 1923 | if(err) { |
1924 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 1924 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
1925 | qdev->ndev->name, err); | 1925 | qdev->ndev->name, err); |
@@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2454 | */ | 2454 | */ |
2455 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2455 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2456 | 2456 | ||
2457 | err = pci_dma_mapping_error(map); | 2457 | err = pci_dma_mapping_error(qdev->pdev, map); |
2458 | if(err) { | 2458 | if(err) { |
2459 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2459 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
2460 | qdev->ndev->name, err); | 2460 | qdev->ndev->name, err); |
@@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2487 | sizeof(struct oal), | 2487 | sizeof(struct oal), |
2488 | PCI_DMA_TODEVICE); | 2488 | PCI_DMA_TODEVICE); |
2489 | 2489 | ||
2490 | err = pci_dma_mapping_error(map); | 2490 | err = pci_dma_mapping_error(qdev->pdev, map); |
2491 | if(err) { | 2491 | if(err) { |
2492 | 2492 | ||
2493 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", | 2493 | printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", |
@@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev, | |||
2514 | frag->page_offset, frag->size, | 2514 | frag->page_offset, frag->size, |
2515 | PCI_DMA_TODEVICE); | 2515 | PCI_DMA_TODEVICE); |
2516 | 2516 | ||
2517 | err = pci_dma_mapping_error(map); | 2517 | err = pci_dma_mapping_error(qdev->pdev, map); |
2518 | if(err) { | 2518 | if(err) { |
2519 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", | 2519 | printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", |
2520 | qdev->ndev->name, err); | 2520 | qdev->ndev->name, err); |
@@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) | |||
2916 | QL_HEADER_SPACE, | 2916 | QL_HEADER_SPACE, |
2917 | PCI_DMA_FROMDEVICE); | 2917 | PCI_DMA_FROMDEVICE); |
2918 | 2918 | ||
2919 | err = pci_dma_mapping_error(map); | 2919 | err = pci_dma_mapping_error(qdev->pdev, map); |
2920 | if(err) { | 2920 | if(err) { |
2921 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", | 2921 | printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", |
2922 | qdev->ndev->name, err); | 2922 | qdev->ndev->name, err); |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 9dae40ccf048..86d77d05190a 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic) | |||
2512 | * Return Value: | 2512 | * Return Value: |
2513 | * SUCCESS on success or an appropriate -ve value on failure. | 2513 | * SUCCESS on success or an appropriate -ve value on failure. |
2514 | */ | 2514 | */ |
2515 | 2515 | static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, | |
2516 | static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | 2516 | int from_card_up) |
2517 | { | 2517 | { |
2518 | struct sk_buff *skb; | 2518 | struct sk_buff *skb; |
2519 | struct RxD_t *rxdp; | 2519 | struct RxD_t *rxdp; |
@@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2602 | rxdp1->Buffer0_ptr = pci_map_single | 2602 | rxdp1->Buffer0_ptr = pci_map_single |
2603 | (ring->pdev, skb->data, size - NET_IP_ALIGN, | 2603 | (ring->pdev, skb->data, size - NET_IP_ALIGN, |
2604 | PCI_DMA_FROMDEVICE); | 2604 | PCI_DMA_FROMDEVICE); |
2605 | if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) | 2605 | if (pci_dma_mapping_error(nic->pdev, |
2606 | rxdp1->Buffer0_ptr)) | ||
2606 | goto pci_map_failed; | 2607 | goto pci_map_failed; |
2607 | 2608 | ||
2608 | rxdp->Control_2 = | 2609 | rxdp->Control_2 = |
@@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2636 | rxdp3->Buffer0_ptr = | 2637 | rxdp3->Buffer0_ptr = |
2637 | pci_map_single(ring->pdev, ba->ba_0, | 2638 | pci_map_single(ring->pdev, ba->ba_0, |
2638 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2639 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
2639 | if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) | 2640 | if (pci_dma_mapping_error(nic->pdev, |
2641 | rxdp3->Buffer0_ptr)) | ||
2640 | goto pci_map_failed; | 2642 | goto pci_map_failed; |
2641 | } else | 2643 | } else |
2642 | pci_dma_sync_single_for_device(ring->pdev, | 2644 | pci_dma_sync_single_for_device(ring->pdev, |
@@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2655 | (ring->pdev, skb->data, ring->mtu + 4, | 2657 | (ring->pdev, skb->data, ring->mtu + 4, |
2656 | PCI_DMA_FROMDEVICE); | 2658 | PCI_DMA_FROMDEVICE); |
2657 | 2659 | ||
2658 | if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) | 2660 | if (pci_dma_mapping_error(nic->pdev, |
2661 | rxdp3->Buffer2_ptr)) | ||
2659 | goto pci_map_failed; | 2662 | goto pci_map_failed; |
2660 | 2663 | ||
2661 | if (from_card_up) { | 2664 | if (from_card_up) { |
@@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up) | |||
2664 | ba->ba_1, BUF1_LEN, | 2667 | ba->ba_1, BUF1_LEN, |
2665 | PCI_DMA_FROMDEVICE); | 2668 | PCI_DMA_FROMDEVICE); |
2666 | 2669 | ||
2667 | if (pci_dma_mapping_error | 2670 | if (pci_dma_mapping_error(nic->pdev, |
2668 | (rxdp3->Buffer1_ptr)) { | 2671 | rxdp3->Buffer1_ptr)) { |
2669 | pci_unmap_single | 2672 | pci_unmap_single |
2670 | (ring->pdev, | 2673 | (ring->pdev, |
2671 | (dma_addr_t)(unsigned long) | 2674 | (dma_addr_t)(unsigned long) |
@@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
2806 | } | 2809 | } |
2807 | } | 2810 | } |
2808 | 2811 | ||
2809 | static int s2io_chk_rx_buffers(struct ring_info *ring) | 2812 | static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring) |
2810 | { | 2813 | { |
2811 | if (fill_rx_buffers(ring, 0) == -ENOMEM) { | 2814 | if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { |
2812 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | 2815 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); |
2813 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | 2816 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); |
2814 | } | 2817 | } |
@@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget) | |||
2848 | return 0; | 2851 | return 0; |
2849 | 2852 | ||
2850 | pkts_processed = rx_intr_handler(ring, budget); | 2853 | pkts_processed = rx_intr_handler(ring, budget); |
2851 | s2io_chk_rx_buffers(ring); | 2854 | s2io_chk_rx_buffers(nic, ring); |
2852 | 2855 | ||
2853 | if (pkts_processed < budget_org) { | 2856 | if (pkts_processed < budget_org) { |
2854 | netif_rx_complete(dev, napi); | 2857 | netif_rx_complete(dev, napi); |
@@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget) | |||
2882 | for (i = 0; i < config->rx_ring_num; i++) { | 2885 | for (i = 0; i < config->rx_ring_num; i++) { |
2883 | ring = &mac_control->rings[i]; | 2886 | ring = &mac_control->rings[i]; |
2884 | ring_pkts_processed = rx_intr_handler(ring, budget); | 2887 | ring_pkts_processed = rx_intr_handler(ring, budget); |
2885 | s2io_chk_rx_buffers(ring); | 2888 | s2io_chk_rx_buffers(nic, ring); |
2886 | pkts_processed += ring_pkts_processed; | 2889 | pkts_processed += ring_pkts_processed; |
2887 | budget -= ring_pkts_processed; | 2890 | budget -= ring_pkts_processed; |
2888 | if (budget <= 0) | 2891 | if (budget <= 0) |
@@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev) | |||
2939 | rx_intr_handler(&mac_control->rings[i], 0); | 2942 | rx_intr_handler(&mac_control->rings[i], 0); |
2940 | 2943 | ||
2941 | for (i = 0; i < config->rx_ring_num; i++) { | 2944 | for (i = 0; i < config->rx_ring_num; i++) { |
2942 | if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) { | 2945 | if (fill_rx_buffers(nic, &mac_control->rings[i], 0) == |
2946 | -ENOMEM) { | ||
2943 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2947 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
2944 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); | 2948 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); |
2945 | break; | 2949 | break; |
@@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) | |||
4235 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, | 4239 | txdp->Buffer_Pointer = pci_map_single(sp->pdev, |
4236 | fifo->ufo_in_band_v, | 4240 | fifo->ufo_in_band_v, |
4237 | sizeof(u64), PCI_DMA_TODEVICE); | 4241 | sizeof(u64), PCI_DMA_TODEVICE); |
4238 | if (pci_dma_mapping_error(txdp->Buffer_Pointer)) | 4242 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4239 | goto pci_map_failed; | 4243 | goto pci_map_failed; |
4240 | txdp++; | 4244 | txdp++; |
4241 | } | 4245 | } |
4242 | 4246 | ||
4243 | txdp->Buffer_Pointer = pci_map_single | 4247 | txdp->Buffer_Pointer = pci_map_single |
4244 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); | 4248 | (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); |
4245 | if (pci_dma_mapping_error(txdp->Buffer_Pointer)) | 4249 | if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) |
4246 | goto pci_map_failed; | 4250 | goto pci_map_failed; |
4247 | 4251 | ||
4248 | txdp->Host_Control = (unsigned long) skb; | 4252 | txdp->Host_Control = (unsigned long) skb; |
@@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | |||
4345 | netif_rx_schedule(dev, &ring->napi); | 4349 | netif_rx_schedule(dev, &ring->napi); |
4346 | } else { | 4350 | } else { |
4347 | rx_intr_handler(ring, 0); | 4351 | rx_intr_handler(ring, 0); |
4348 | s2io_chk_rx_buffers(ring); | 4352 | s2io_chk_rx_buffers(sp, ring); |
4349 | } | 4353 | } |
4350 | 4354 | ||
4351 | return IRQ_HANDLED; | 4355 | return IRQ_HANDLED; |
@@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
4826 | */ | 4830 | */ |
4827 | if (!config->napi) { | 4831 | if (!config->napi) { |
4828 | for (i = 0; i < config->rx_ring_num; i++) | 4832 | for (i = 0; i < config->rx_ring_num; i++) |
4829 | s2io_chk_rx_buffers(&mac_control->rings[i]); | 4833 | s2io_chk_rx_buffers(sp, &mac_control->rings[i]); |
4830 | } | 4834 | } |
4831 | writeq(sp->general_int_mask, &bar0->general_int_mask); | 4835 | writeq(sp->general_int_mask, &bar0->general_int_mask); |
4832 | readl(&bar0->general_int_status); | 4836 | readl(&bar0->general_int_status); |
@@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6859 | pci_map_single( sp->pdev, (*skb)->data, | 6863 | pci_map_single( sp->pdev, (*skb)->data, |
6860 | size - NET_IP_ALIGN, | 6864 | size - NET_IP_ALIGN, |
6861 | PCI_DMA_FROMDEVICE); | 6865 | PCI_DMA_FROMDEVICE); |
6862 | if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) | 6866 | if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) |
6863 | goto memalloc_failed; | 6867 | goto memalloc_failed; |
6864 | rxdp->Host_Control = (unsigned long) (*skb); | 6868 | rxdp->Host_Control = (unsigned long) (*skb); |
6865 | } | 6869 | } |
@@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6886 | pci_map_single(sp->pdev, (*skb)->data, | 6890 | pci_map_single(sp->pdev, (*skb)->data, |
6887 | dev->mtu + 4, | 6891 | dev->mtu + 4, |
6888 | PCI_DMA_FROMDEVICE); | 6892 | PCI_DMA_FROMDEVICE); |
6889 | if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) | 6893 | if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) |
6890 | goto memalloc_failed; | 6894 | goto memalloc_failed; |
6891 | rxdp3->Buffer0_ptr = *temp0 = | 6895 | rxdp3->Buffer0_ptr = *temp0 = |
6892 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, | 6896 | pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, |
6893 | PCI_DMA_FROMDEVICE); | 6897 | PCI_DMA_FROMDEVICE); |
6894 | if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { | 6898 | if (pci_dma_mapping_error(sp->pdev, |
6899 | rxdp3->Buffer0_ptr)) { | ||
6895 | pci_unmap_single (sp->pdev, | 6900 | pci_unmap_single (sp->pdev, |
6896 | (dma_addr_t)rxdp3->Buffer2_ptr, | 6901 | (dma_addr_t)rxdp3->Buffer2_ptr, |
6897 | dev->mtu + 4, PCI_DMA_FROMDEVICE); | 6902 | dev->mtu + 4, PCI_DMA_FROMDEVICE); |
@@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, | |||
6903 | rxdp3->Buffer1_ptr = *temp1 = | 6908 | rxdp3->Buffer1_ptr = *temp1 = |
6904 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, | 6909 | pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, |
6905 | PCI_DMA_FROMDEVICE); | 6910 | PCI_DMA_FROMDEVICE); |
6906 | if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { | 6911 | if (pci_dma_mapping_error(sp->pdev, |
6912 | rxdp3->Buffer1_ptr)) { | ||
6907 | pci_unmap_single (sp->pdev, | 6913 | pci_unmap_single (sp->pdev, |
6908 | (dma_addr_t)rxdp3->Buffer0_ptr, | 6914 | (dma_addr_t)rxdp3->Buffer0_ptr, |
6909 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 6915 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
@@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
7187 | 7193 | ||
7188 | for (i = 0; i < config->rx_ring_num; i++) { | 7194 | for (i = 0; i < config->rx_ring_num; i++) { |
7189 | mac_control->rings[i].mtu = dev->mtu; | 7195 | mac_control->rings[i].mtu = dev->mtu; |
7190 | ret = fill_rx_buffers(&mac_control->rings[i], 1); | 7196 | ret = fill_rx_buffers(sp, &mac_control->rings[i], 1); |
7191 | if (ret) { | 7197 | if (ret) { |
7192 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", | 7198 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", |
7193 | dev->name); | 7199 | dev->name); |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 601b001437c0..0d27dd39bc09 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, | |||
233 | rx_buf->data, rx_buf->len, | 233 | rx_buf->data, rx_buf->len, |
234 | PCI_DMA_FROMDEVICE); | 234 | PCI_DMA_FROMDEVICE); |
235 | 235 | ||
236 | if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { | 236 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { |
237 | dev_kfree_skb_any(rx_buf->skb); | 237 | dev_kfree_skb_any(rx_buf->skb); |
238 | rx_buf->skb = NULL; | 238 | rx_buf->skb = NULL; |
239 | return -EIO; | 239 | return -EIO; |
@@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
275 | 0, efx_rx_buf_size(efx), | 275 | 0, efx_rx_buf_size(efx), |
276 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
277 | 277 | ||
278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { |
279 | __free_pages(rx_buf->page, efx->rx_buffer_order); | 279 | __free_pages(rx_buf->page, efx->rx_buffer_order); |
280 | rx_buf->page = NULL; | 280 | rx_buf->page = NULL; |
281 | return -EIO; | 281 | return -EIO; |
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 5cdd082ab8f6..5e8374ab28ee 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
@@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, | |||
172 | 172 | ||
173 | /* Process all fragments */ | 173 | /* Process all fragments */ |
174 | while (1) { | 174 | while (1) { |
175 | if (unlikely(pci_dma_mapping_error(dma_addr))) | 175 | if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) |
176 | goto pci_err; | 176 | goto pci_err; |
177 | 177 | ||
178 | /* Store fields for marking in the per-fragment final | 178 | /* Store fields for marking in the per-fragment final |
@@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) | |||
661 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, | 661 | tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, |
662 | TSOH_BUFFER(tsoh), header_len, | 662 | TSOH_BUFFER(tsoh), header_len, |
663 | PCI_DMA_TODEVICE); | 663 | PCI_DMA_TODEVICE); |
664 | if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { | 664 | if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, |
665 | tsoh->dma_addr))) { | ||
665 | kfree(tsoh); | 666 | kfree(tsoh); |
666 | return NULL; | 667 | return NULL; |
667 | } | 668 | } |
@@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, | |||
863 | 864 | ||
864 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, | 865 | st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, |
865 | len, PCI_DMA_TODEVICE); | 866 | len, PCI_DMA_TODEVICE); |
866 | if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { | 867 | if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { |
867 | st->ifc.unmap_len = len; | 868 | st->ifc.unmap_len = len; |
868 | st->ifc.len = len; | 869 | st->ifc.len = len; |
869 | st->ifc.dma_addr = st->ifc.unmap_addr; | 870 | st->ifc.dma_addr = st->ifc.unmap_addr; |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 00aa0b108cb9..b6435d0d71f9 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, | |||
452 | /* iommu-map the skb */ | 452 | /* iommu-map the skb */ |
453 | buf = pci_map_single(card->pdev, descr->skb->data, | 453 | buf = pci_map_single(card->pdev, descr->skb->data, |
454 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); | 454 | SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); |
455 | if (pci_dma_mapping_error(buf)) { | 455 | if (pci_dma_mapping_error(card->pdev, buf)) { |
456 | dev_kfree_skb_any(descr->skb); | 456 | dev_kfree_skb_any(descr->skb); |
457 | descr->skb = NULL; | 457 | descr->skb = NULL; |
458 | if (netif_msg_rx_err(card) && net_ratelimit()) | 458 | if (netif_msg_rx_err(card) && net_ratelimit()) |
@@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, | |||
691 | unsigned long flags; | 691 | unsigned long flags; |
692 | 692 | ||
693 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); | 693 | buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); |
694 | if (pci_dma_mapping_error(buf)) { | 694 | if (pci_dma_mapping_error(card->pdev, buf)) { |
695 | if (netif_msg_tx_err(card) && net_ratelimit()) | 695 | if (netif_msg_tx_err(card) && net_ratelimit()) |
696 | dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " | 696 | dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " |
697 | "Dropping packet\n", skb->data, skb->len); | 697 | "Dropping packet\n", skb->data, skb->len); |
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index a645e5028c14..8487ace9d2e3 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c | |||
@@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) | |||
506 | return NULL; | 506 | return NULL; |
507 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, | 507 | *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, |
508 | PCI_DMA_FROMDEVICE); | 508 | PCI_DMA_FROMDEVICE); |
509 | if (pci_dma_mapping_error(*dma_handle)) { | 509 | if (pci_dma_mapping_error(hwdev, *dma_handle)) { |
510 | free_page((unsigned long)buf); | 510 | free_page((unsigned long)buf); |
511 | return NULL; | 511 | return NULL; |
512 | } | 512 | } |
@@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, | |||
536 | return NULL; | 536 | return NULL; |
537 | *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, | 537 | *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, |
538 | PCI_DMA_FROMDEVICE); | 538 | PCI_DMA_FROMDEVICE); |
539 | if (pci_dma_mapping_error(*dma_handle)) { | 539 | if (pci_dma_mapping_error(hwdev, *dma_handle)) { |
540 | dev_kfree_skb_any(skb); | 540 | dev_kfree_skb_any(skb); |
541 | return NULL; | 541 | return NULL; |
542 | } | 542 | } |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index 217d506527a9..d9769c527346 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | |||
1166 | bf->skb = skb; | 1166 | bf->skb = skb; |
1167 | bf->skbaddr = pci_map_single(sc->pdev, | 1167 | bf->skbaddr = pci_map_single(sc->pdev, |
1168 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); | 1168 | skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); |
1169 | if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { | 1169 | if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) { |
1170 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); | 1170 | ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); |
1171 | dev_kfree_skb(skb); | 1171 | dev_kfree_skb(skb); |
1172 | bf->skb = NULL; | 1172 | bf->skb = NULL; |
@@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) | |||
1918 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " | 1918 | ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " |
1919 | "skbaddr %llx\n", skb, skb->data, skb->len, | 1919 | "skbaddr %llx\n", skb, skb->data, skb->len, |
1920 | (unsigned long long)bf->skbaddr); | 1920 | (unsigned long long)bf->skbaddr); |
1921 | if (pci_dma_mapping_error(bf->skbaddr)) { | 1921 | if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { |
1922 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); | 1922 | ATH5K_ERR(sc, "beacon DMA mapping failed\n"); |
1923 | return -EIO; | 1923 | return -EIO; |
1924 | } | 1924 | } |