aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2006-12-12 17:28:30 -0500
committerRoland Dreier <rolandd@cisco.com>2006-12-12 17:28:30 -0500
commit1527106ff8cf6afb15f68c8820605a0d32263173 (patch)
treeeda9162aca8ffb1acddb6c86f561c40dfe69dd33 /drivers/infiniband
parentf2cbb660ed37294e3eeb98c045de6890079ccb01 (diff)
IB/core: Use the new verbs DMA mapping functions
Convert code in core/ to use the new DMA mapping functions for kernel verbs consumers. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/mad.c90
-rw-r--r--drivers/infiniband/core/mad_priv.h6
-rw-r--r--drivers/infiniband/core/uverbs_mem.c12
3 files changed, 53 insertions, 55 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 15f38d94b3a8..5ed141ebd1c8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
998 998
999 mad_agent = mad_send_wr->send_buf.mad_agent; 999 mad_agent = mad_send_wr->send_buf.mad_agent;
1000 sge = mad_send_wr->sg_list; 1000 sge = mad_send_wr->sg_list;
1001 sge[0].addr = dma_map_single(mad_agent->device->dma_device, 1001 sge[0].addr = ib_dma_map_single(mad_agent->device,
1002 mad_send_wr->send_buf.mad, 1002 mad_send_wr->send_buf.mad,
1003 sge[0].length, 1003 sge[0].length,
1004 DMA_TO_DEVICE); 1004 DMA_TO_DEVICE);
1005 pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr); 1005 mad_send_wr->header_mapping = sge[0].addr;
1006 1006
1007 sge[1].addr = dma_map_single(mad_agent->device->dma_device, 1007 sge[1].addr = ib_dma_map_single(mad_agent->device,
1008 ib_get_payload(mad_send_wr), 1008 ib_get_payload(mad_send_wr),
1009 sge[1].length, 1009 sge[1].length,
1010 DMA_TO_DEVICE); 1010 DMA_TO_DEVICE);
1011 pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr); 1011 mad_send_wr->payload_mapping = sge[1].addr;
1012 1012
1013 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1013 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1014 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 1014 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
@@ -1026,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1026 } 1026 }
1027 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1027 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1028 if (ret) { 1028 if (ret) {
1029 dma_unmap_single(mad_agent->device->dma_device, 1029 ib_dma_unmap_single(mad_agent->device,
1030 pci_unmap_addr(mad_send_wr, header_mapping), 1030 mad_send_wr->header_mapping,
1031 sge[0].length, DMA_TO_DEVICE); 1031 sge[0].length, DMA_TO_DEVICE);
1032 dma_unmap_single(mad_agent->device->dma_device, 1032 ib_dma_unmap_single(mad_agent->device,
1033 pci_unmap_addr(mad_send_wr, payload_mapping), 1033 mad_send_wr->payload_mapping,
1034 sge[1].length, DMA_TO_DEVICE); 1034 sge[1].length, DMA_TO_DEVICE);
1035 } 1035 }
1036 return ret; 1036 return ret;
1037} 1037}
@@ -1850,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1850 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 1850 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
1851 mad_list); 1851 mad_list);
1852 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 1852 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1853 dma_unmap_single(port_priv->device->dma_device, 1853 ib_dma_unmap_single(port_priv->device,
1854 pci_unmap_addr(&recv->header, mapping), 1854 recv->header.mapping,
1855 sizeof(struct ib_mad_private) - 1855 sizeof(struct ib_mad_private) -
1856 sizeof(struct ib_mad_private_header), 1856 sizeof(struct ib_mad_private_header),
1857 DMA_FROM_DEVICE); 1857 DMA_FROM_DEVICE);
1858 1858
1859 /* Setup MAD receive work completion from "normal" work completion */ 1859 /* Setup MAD receive work completion from "normal" work completion */
1860 recv->header.wc = *wc; 1860 recv->header.wc = *wc;
@@ -2080,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
2080 qp_info = send_queue->qp_info; 2080 qp_info = send_queue->qp_info;
2081 2081
2082retry: 2082retry:
2083 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2083 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2084 pci_unmap_addr(mad_send_wr, header_mapping), 2084 mad_send_wr->header_mapping,
2085 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2085 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2086 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2086 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2087 pci_unmap_addr(mad_send_wr, payload_mapping), 2087 mad_send_wr->payload_mapping,
2088 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2088 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2089 queued_send_wr = NULL; 2089 queued_send_wr = NULL;
2090 spin_lock_irqsave(&send_queue->lock, flags); 2090 spin_lock_irqsave(&send_queue->lock, flags);
2091 list_del(&mad_list->list); 2091 list_del(&mad_list->list);
@@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2528 break; 2528 break;
2529 } 2529 }
2530 } 2530 }
2531 sg_list.addr = dma_map_single(qp_info->port_priv-> 2531 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2532 device->dma_device, 2532 &mad_priv->grh,
2533 &mad_priv->grh, 2533 sizeof *mad_priv -
2534 sizeof *mad_priv - 2534 sizeof mad_priv->header,
2535 sizeof mad_priv->header, 2535 DMA_FROM_DEVICE);
2536 DMA_FROM_DEVICE); 2536 mad_priv->header.mapping = sg_list.addr;
2537 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2538 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2537 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2539 mad_priv->header.mad_list.mad_queue = recv_queue; 2538 mad_priv->header.mad_list.mad_queue = recv_queue;
2540 2539
@@ -2549,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2549 list_del(&mad_priv->header.mad_list.list); 2548 list_del(&mad_priv->header.mad_list.list);
2550 recv_queue->count--; 2549 recv_queue->count--;
2551 spin_unlock_irqrestore(&recv_queue->lock, flags); 2550 spin_unlock_irqrestore(&recv_queue->lock, flags);
2552 dma_unmap_single(qp_info->port_priv->device->dma_device, 2551 ib_dma_unmap_single(qp_info->port_priv->device,
2553 pci_unmap_addr(&mad_priv->header, 2552 mad_priv->header.mapping,
2554 mapping), 2553 sizeof *mad_priv -
2555 sizeof *mad_priv - 2554 sizeof mad_priv->header,
2556 sizeof mad_priv->header, 2555 DMA_FROM_DEVICE);
2557 DMA_FROM_DEVICE);
2558 kmem_cache_free(ib_mad_cache, mad_priv); 2556 kmem_cache_free(ib_mad_cache, mad_priv);
2559 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); 2557 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret);
2560 break; 2558 break;
@@ -2586,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2586 /* Remove from posted receive MAD list */ 2584 /* Remove from posted receive MAD list */
2587 list_del(&mad_list->list); 2585 list_del(&mad_list->list);
2588 2586
2589 dma_unmap_single(qp_info->port_priv->device->dma_device, 2587 ib_dma_unmap_single(qp_info->port_priv->device,
2590 pci_unmap_addr(&recv->header, mapping), 2588 recv->header.mapping,
2591 sizeof(struct ib_mad_private) - 2589 sizeof(struct ib_mad_private) -
2592 sizeof(struct ib_mad_private_header), 2590 sizeof(struct ib_mad_private_header),
2593 DMA_FROM_DEVICE); 2591 DMA_FROM_DEVICE);
2594 kmem_cache_free(ib_mad_cache, recv); 2592 kmem_cache_free(ib_mad_cache, recv);
2595 } 2593 }
2596 2594
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d5548e73e068..de89717f49fe 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -73,7 +73,7 @@ struct ib_mad_private_header {
73 struct ib_mad_list_head mad_list; 73 struct ib_mad_list_head mad_list;
74 struct ib_mad_recv_wc recv_wc; 74 struct ib_mad_recv_wc recv_wc;
75 struct ib_wc wc; 75 struct ib_wc wc;
76 DECLARE_PCI_UNMAP_ADDR(mapping) 76 u64 mapping;
77} __attribute__ ((packed)); 77} __attribute__ ((packed));
78 78
79struct ib_mad_private { 79struct ib_mad_private {
@@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
126 struct list_head agent_list; 126 struct list_head agent_list;
127 struct ib_mad_agent_private *mad_agent_priv; 127 struct ib_mad_agent_private *mad_agent_priv;
128 struct ib_mad_send_buf send_buf; 128 struct ib_mad_send_buf send_buf;
129 DECLARE_PCI_UNMAP_ADDR(header_mapping) 129 u64 header_mapping;
130 DECLARE_PCI_UNMAP_ADDR(payload_mapping) 130 u64 payload_mapping;
131 struct ib_send_wr send_wr; 131 struct ib_send_wr send_wr;
132 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 132 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
133 __be64 tid; 133 __be64 tid;
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index db12cc0841df..c95fe952abd5 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
52 int i; 52 int i;
53 53
54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { 54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
55 dma_unmap_sg(dev->dma_device, chunk->page_list, 55 ib_dma_unmap_sg(dev, chunk->page_list,
56 chunk->nents, DMA_BIDIRECTIONAL); 56 chunk->nents, DMA_BIDIRECTIONAL);
57 for (i = 0; i < chunk->nents; ++i) { 57 for (i = 0; i < chunk->nents; ++i) {
58 if (umem->writable && dirty) 58 if (umem->writable && dirty)
59 set_page_dirty_lock(chunk->page_list[i].page); 59 set_page_dirty_lock(chunk->page_list[i].page);
@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
136 chunk->page_list[i].length = PAGE_SIZE; 136 chunk->page_list[i].length = PAGE_SIZE;
137 } 137 }
138 138
139 chunk->nmap = dma_map_sg(dev->dma_device, 139 chunk->nmap = ib_dma_map_sg(dev,
140 &chunk->page_list[0], 140 &chunk->page_list[0],
141 chunk->nents, 141 chunk->nents,
142 DMA_BIDIRECTIONAL); 142 DMA_BIDIRECTIONAL);
143 if (chunk->nmap <= 0) { 143 if (chunk->nmap <= 0) {
144 for (i = 0; i < chunk->nents; ++i) 144 for (i = 0; i < chunk->nents; ++i)
145 put_page(chunk->page_list[i].page); 145 put_page(chunk->page_list[i].page);