aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ibmveth.c
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2008-07-25 22:44:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-26 15:00:03 -0400
commit8d8bb39b9eba32dd70e87fd5ad5c5dd4ba118e06 (patch)
tree64090a84f4c4466f9f30ff46c993e0cede379052 /drivers/net/ibmveth.c
parentc485b465a031b6f9b9a51300e0ee1f86efc6db87 (diff)
dma-mapping: add the device argument to dma_mapping_error()
Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER architecture does: This enables us to cleanly fix the Calgary IOMMU issue that some devices are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423). I think that per-device dma_mapping_ops support would be also helpful for KVM people to support PCI passthrough but Andi thinks that this makes it difficult to support the PCI passthrough (see the above thread). So I CC'ed this to KVM camp. Comments are appreciated. A pointer to dma_mapping_ops to struct dev_archdata is added. If the pointer is non NULL, DMA operations in asm/dma-mapping.h use it. If it's NULL, the system-wide dma_ops pointer is used as before. If it's useful for KVM people, I plan to implement a mechanism to register a hook called when a new pci (or dma capable) device is created (it works with hot plugging). It enables IOMMUs to set up an appropriate dma_mapping_ops per device. The major obstacle is that dma_mapping_error doesn't take a pointer to the device unlike other DMA operations. So x86 can't have dma_mapping_ops per device. Note all the POWER IOMMUs use the same dma_mapping_error function so this is not a problem for POWER but x86 IOMMUs use different dma_mapping_error functions. The first patch adds the device argument to dma_mapping_error. The patch is trivial but large since it touches lots of drivers and dma-mapping.h in all the architecture. This patch: dma_mapping_error() doesn't take a pointer to the device unlike other DMA operations. So we can't have dma_mapping_ops per device. Note that POWER already has dma_mapping_ops per device but all the POWER IOMMUs use the same dma_mapping_error function. x86 IOMMUs use device argument. [akpm@linux-foundation.org: fix sge] [akpm@linux-foundation.org: fix svc_rdma] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix bnx2x] [akpm@linux-foundation.org: fix s2io] [akpm@linux-foundation.org: fix pasemi_mac] [akpm@linux-foundation.org: fix sdhci] [akpm@linux-foundation.org: build fix] [akpm@linux-foundation.org: fix sparc] [akpm@linux-foundation.org: fix ibmvscsi] Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Muli Ben-Yehuda <muli@il.ibm.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r--drivers/net/ibmveth.c38
1 files changed, 20 insertions, 18 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index e5a6e2e84540..91ec9fdc7184 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
261 pool->buff_size, DMA_FROM_DEVICE); 261 pool->buff_size, DMA_FROM_DEVICE);
262 262
263 if (dma_mapping_error(dma_addr)) 263 if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
264 goto failure; 264 goto failure;
265 265
266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
@@ -294,7 +294,7 @@ failure:
294 pool->consumer_index = pool->size - 1; 294 pool->consumer_index = pool->size - 1;
295 else 295 else
296 pool->consumer_index--; 296 pool->consumer_index--;
297 if (!dma_mapping_error(dma_addr)) 297 if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
298 dma_unmap_single(&adapter->vdev->dev, 298 dma_unmap_single(&adapter->vdev->dev,
299 pool->dma_addr[index], pool->buff_size, 299 pool->dma_addr[index], pool->buff_size,
300 DMA_FROM_DEVICE); 300 DMA_FROM_DEVICE);
@@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
448static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 448static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
449{ 449{
450 int i; 450 int i;
451 struct device *dev = &adapter->vdev->dev;
451 452
452 if(adapter->buffer_list_addr != NULL) { 453 if(adapter->buffer_list_addr != NULL) {
453 if(!dma_mapping_error(adapter->buffer_list_dma)) { 454 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
454 dma_unmap_single(&adapter->vdev->dev, 455 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
455 adapter->buffer_list_dma, 4096,
456 DMA_BIDIRECTIONAL); 456 DMA_BIDIRECTIONAL);
457 adapter->buffer_list_dma = DMA_ERROR_CODE; 457 adapter->buffer_list_dma = DMA_ERROR_CODE;
458 } 458 }
@@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
461 } 461 }
462 462
463 if(adapter->filter_list_addr != NULL) { 463 if(adapter->filter_list_addr != NULL) {
464 if(!dma_mapping_error(adapter->filter_list_dma)) { 464 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
465 dma_unmap_single(&adapter->vdev->dev, 465 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
466 adapter->filter_list_dma, 4096,
467 DMA_BIDIRECTIONAL); 466 DMA_BIDIRECTIONAL);
468 adapter->filter_list_dma = DMA_ERROR_CODE; 467 adapter->filter_list_dma = DMA_ERROR_CODE;
469 } 468 }
@@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
472 } 471 }
473 472
474 if(adapter->rx_queue.queue_addr != NULL) { 473 if(adapter->rx_queue.queue_addr != NULL) {
475 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { 474 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
476 dma_unmap_single(&adapter->vdev->dev, 475 dma_unmap_single(dev,
477 adapter->rx_queue.queue_dma, 476 adapter->rx_queue.queue_dma,
478 adapter->rx_queue.queue_len, 477 adapter->rx_queue.queue_len,
479 DMA_BIDIRECTIONAL); 478 DMA_BIDIRECTIONAL);
@@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev)
535 int rc; 534 int rc;
536 union ibmveth_buf_desc rxq_desc; 535 union ibmveth_buf_desc rxq_desc;
537 int i; 536 int i;
537 struct device *dev;
538 538
539 ibmveth_debug_printk("open starting\n"); 539 ibmveth_debug_printk("open starting\n");
540 540
@@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev)
563 return -ENOMEM; 563 return -ENOMEM;
564 } 564 }
565 565
566 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, 566 dev = &adapter->vdev->dev;
567
568 adapter->buffer_list_dma = dma_map_single(dev,
567 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); 569 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
568 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, 570 adapter->filter_list_dma = dma_map_single(dev,
569 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); 571 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
570 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, 572 adapter->rx_queue.queue_dma = dma_map_single(dev,
571 adapter->rx_queue.queue_addr, 573 adapter->rx_queue.queue_addr,
572 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); 574 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
573 575
574 if((dma_mapping_error(adapter->buffer_list_dma) ) || 576 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
575 (dma_mapping_error(adapter->filter_list_dma)) || 577 (dma_mapping_error(dev, adapter->filter_list_dma)) ||
576 (dma_mapping_error(adapter->rx_queue.queue_dma))) { 578 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
577 ibmveth_error_printk("unable to map filter or buffer list pages\n"); 579 ibmveth_error_printk("unable to map filter or buffer list pages\n");
578 ibmveth_cleanup(adapter); 580 ibmveth_cleanup(adapter);
579 napi_disable(&adapter->napi); 581 napi_disable(&adapter->napi);
@@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev)
645 adapter->bounce_buffer_dma = 647 adapter->bounce_buffer_dma =
646 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 648 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
647 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 649 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
648 if (dma_mapping_error(adapter->bounce_buffer_dma)) { 650 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
649 ibmveth_error_printk("unable to map bounce buffer\n"); 651 ibmveth_error_printk("unable to map bounce buffer\n");
650 ibmveth_cleanup(adapter); 652 ibmveth_cleanup(adapter);
651 napi_disable(&adapter->napi); 653 napi_disable(&adapter->napi);
@@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
922 buf[1] = 0; 924 buf[1] = 0;
923 } 925 }
924 926
925 if (dma_mapping_error(data_dma_addr)) { 927 if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
926 if (!firmware_has_feature(FW_FEATURE_CMO)) 928 if (!firmware_has_feature(FW_FEATURE_CMO))
927 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 929 ibmveth_error_printk("tx: unable to map xmit buffer\n");
928 skb_copy_from_linear_data(skb, adapter->bounce_buffer, 930 skb_copy_from_linear_data(skb, adapter->bounce_buffer,