diff options
Diffstat (limited to 'drivers/infiniband/core/mad.c')
-rw-r--r-- | drivers/infiniband/core/mad.c | 90 |
1 files changed, 44 insertions, 46 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 15f38d94b3a..5ed141ebd1c 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
998 | 998 | ||
999 | mad_agent = mad_send_wr->send_buf.mad_agent; | 999 | mad_agent = mad_send_wr->send_buf.mad_agent; |
1000 | sge = mad_send_wr->sg_list; | 1000 | sge = mad_send_wr->sg_list; |
1001 | sge[0].addr = dma_map_single(mad_agent->device->dma_device, | 1001 | sge[0].addr = ib_dma_map_single(mad_agent->device, |
1002 | mad_send_wr->send_buf.mad, | 1002 | mad_send_wr->send_buf.mad, |
1003 | sge[0].length, | 1003 | sge[0].length, |
1004 | DMA_TO_DEVICE); | 1004 | DMA_TO_DEVICE); |
1005 | pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr); | 1005 | mad_send_wr->header_mapping = sge[0].addr; |
1006 | 1006 | ||
1007 | sge[1].addr = dma_map_single(mad_agent->device->dma_device, | 1007 | sge[1].addr = ib_dma_map_single(mad_agent->device, |
1008 | ib_get_payload(mad_send_wr), | 1008 | ib_get_payload(mad_send_wr), |
1009 | sge[1].length, | 1009 | sge[1].length, |
1010 | DMA_TO_DEVICE); | 1010 | DMA_TO_DEVICE); |
1011 | pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr); | 1011 | mad_send_wr->payload_mapping = sge[1].addr; |
1012 | 1012 | ||
1013 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | 1013 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
1014 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { | 1014 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { |
@@ -1026,12 +1026,12 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
1026 | } | 1026 | } |
1027 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | 1027 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); |
1028 | if (ret) { | 1028 | if (ret) { |
1029 | dma_unmap_single(mad_agent->device->dma_device, | 1029 | ib_dma_unmap_single(mad_agent->device, |
1030 | pci_unmap_addr(mad_send_wr, header_mapping), | 1030 | mad_send_wr->header_mapping, |
1031 | sge[0].length, DMA_TO_DEVICE); | 1031 | sge[0].length, DMA_TO_DEVICE); |
1032 | dma_unmap_single(mad_agent->device->dma_device, | 1032 | ib_dma_unmap_single(mad_agent->device, |
1033 | pci_unmap_addr(mad_send_wr, payload_mapping), | 1033 | mad_send_wr->payload_mapping, |
1034 | sge[1].length, DMA_TO_DEVICE); | 1034 | sge[1].length, DMA_TO_DEVICE); |
1035 | } | 1035 | } |
1036 | return ret; | 1036 | return ret; |
1037 | } | 1037 | } |
@@ -1850,11 +1850,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
1850 | mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, | 1850 | mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, |
1851 | mad_list); | 1851 | mad_list); |
1852 | recv = container_of(mad_priv_hdr, struct ib_mad_private, header); | 1852 | recv = container_of(mad_priv_hdr, struct ib_mad_private, header); |
1853 | dma_unmap_single(port_priv->device->dma_device, | 1853 | ib_dma_unmap_single(port_priv->device, |
1854 | pci_unmap_addr(&recv->header, mapping), | 1854 | recv->header.mapping, |
1855 | sizeof(struct ib_mad_private) - | 1855 | sizeof(struct ib_mad_private) - |
1856 | sizeof(struct ib_mad_private_header), | 1856 | sizeof(struct ib_mad_private_header), |
1857 | DMA_FROM_DEVICE); | 1857 | DMA_FROM_DEVICE); |
1858 | 1858 | ||
1859 | /* Setup MAD receive work completion from "normal" work completion */ | 1859 | /* Setup MAD receive work completion from "normal" work completion */ |
1860 | recv->header.wc = *wc; | 1860 | recv->header.wc = *wc; |
@@ -2080,12 +2080,12 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, | |||
2080 | qp_info = send_queue->qp_info; | 2080 | qp_info = send_queue->qp_info; |
2081 | 2081 | ||
2082 | retry: | 2082 | retry: |
2083 | dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, | 2083 | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, |
2084 | pci_unmap_addr(mad_send_wr, header_mapping), | 2084 | mad_send_wr->header_mapping, |
2085 | mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); | 2085 | mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); |
2086 | dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, | 2086 | ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, |
2087 | pci_unmap_addr(mad_send_wr, payload_mapping), | 2087 | mad_send_wr->payload_mapping, |
2088 | mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); | 2088 | mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); |
2089 | queued_send_wr = NULL; | 2089 | queued_send_wr = NULL; |
2090 | spin_lock_irqsave(&send_queue->lock, flags); | 2090 | spin_lock_irqsave(&send_queue->lock, flags); |
2091 | list_del(&mad_list->list); | 2091 | list_del(&mad_list->list); |
@@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |||
2528 | break; | 2528 | break; |
2529 | } | 2529 | } |
2530 | } | 2530 | } |
2531 | sg_list.addr = dma_map_single(qp_info->port_priv-> | 2531 | sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, |
2532 | device->dma_device, | 2532 | &mad_priv->grh, |
2533 | &mad_priv->grh, | 2533 | sizeof *mad_priv - |
2534 | sizeof *mad_priv - | 2534 | sizeof mad_priv->header, |
2535 | sizeof mad_priv->header, | 2535 | DMA_FROM_DEVICE); |
2536 | DMA_FROM_DEVICE); | 2536 | mad_priv->header.mapping = sg_list.addr; |
2537 | pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); | ||
2538 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; | 2537 | recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; |
2539 | mad_priv->header.mad_list.mad_queue = recv_queue; | 2538 | mad_priv->header.mad_list.mad_queue = recv_queue; |
2540 | 2539 | ||
@@ -2549,12 +2548,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | |||
2549 | list_del(&mad_priv->header.mad_list.list); | 2548 | list_del(&mad_priv->header.mad_list.list); |
2550 | recv_queue->count--; | 2549 | recv_queue->count--; |
2551 | spin_unlock_irqrestore(&recv_queue->lock, flags); | 2550 | spin_unlock_irqrestore(&recv_queue->lock, flags); |
2552 | dma_unmap_single(qp_info->port_priv->device->dma_device, | 2551 | ib_dma_unmap_single(qp_info->port_priv->device, |
2553 | pci_unmap_addr(&mad_priv->header, | 2552 | mad_priv->header.mapping, |
2554 | mapping), | 2553 | sizeof *mad_priv - |
2555 | sizeof *mad_priv - | 2554 | sizeof mad_priv->header, |
2556 | sizeof mad_priv->header, | 2555 | DMA_FROM_DEVICE); |
2557 | DMA_FROM_DEVICE); | ||
2558 | kmem_cache_free(ib_mad_cache, mad_priv); | 2556 | kmem_cache_free(ib_mad_cache, mad_priv); |
2559 | printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); | 2557 | printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); |
2560 | break; | 2558 | break; |
@@ -2586,11 +2584,11 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) | |||
2586 | /* Remove from posted receive MAD list */ | 2584 | /* Remove from posted receive MAD list */ |
2587 | list_del(&mad_list->list); | 2585 | list_del(&mad_list->list); |
2588 | 2586 | ||
2589 | dma_unmap_single(qp_info->port_priv->device->dma_device, | 2587 | ib_dma_unmap_single(qp_info->port_priv->device, |
2590 | pci_unmap_addr(&recv->header, mapping), | 2588 | recv->header.mapping, |
2591 | sizeof(struct ib_mad_private) - | 2589 | sizeof(struct ib_mad_private) - |
2592 | sizeof(struct ib_mad_private_header), | 2590 | sizeof(struct ib_mad_private_header), |
2593 | DMA_FROM_DEVICE); | 2591 | DMA_FROM_DEVICE); |
2594 | kmem_cache_free(ib_mad_cache, recv); | 2592 | kmem_cache_free(ib_mad_cache, recv); |
2595 | } | 2593 | } |
2596 | 2594 | ||