aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ntb
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2013-10-18 13:35:31 -0400
committerDan Williams <dan.j.williams@intel.com>2013-11-14 14:01:32 -0500
commit6f57fd0578dff23a4bd16118f0cb4201bcec91f1 (patch)
tree093a0b9efd8255874008a641582e53fe458aa1ff /drivers/ntb
parent1786b943dad0b2f655e69b3ad5187f7e39ef32e6 (diff)
NTB: convert to dmaengine_unmap_data
Use the generic unmap object to unmap dma buffers. Cc: Vinod Koul <vinod.koul@intel.com> Cc: Tomasz Figa <t.figa@samsung.com> Cc: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Jon Mason <jon.mason@intel.com> [djbw: fix up unmap len, and GFP flags] Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/ntb')
-rw-r--r--drivers/ntb/ntb_transport.c85
1 files changed, 58 insertions, 27 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 12a9e83c008b..222c2baa3a4b 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -1034,7 +1034,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1034 struct dma_chan *chan = qp->dma_chan; 1034 struct dma_chan *chan = qp->dma_chan;
1035 struct dma_device *device; 1035 struct dma_device *device;
1036 size_t pay_off, buff_off; 1036 size_t pay_off, buff_off;
1037 dma_addr_t src, dest; 1037 struct dmaengine_unmap_data *unmap;
1038 dma_cookie_t cookie; 1038 dma_cookie_t cookie;
1039 void *buf = entry->buf; 1039 void *buf = entry->buf;
1040 unsigned long flags; 1040 unsigned long flags;
@@ -1045,35 +1045,50 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1045 goto err; 1045 goto err;
1046 1046
1047 if (len < copy_bytes) 1047 if (len < copy_bytes)
1048 goto err1; 1048 goto err_wait;
1049 1049
1050 device = chan->device; 1050 device = chan->device;
1051 pay_off = (size_t) offset & ~PAGE_MASK; 1051 pay_off = (size_t) offset & ~PAGE_MASK;
1052 buff_off = (size_t) buf & ~PAGE_MASK; 1052 buff_off = (size_t) buf & ~PAGE_MASK;
1053 1053
1054 if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) 1054 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1055 goto err1; 1055 goto err_wait;
1056 1056
1057 dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE); 1057 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1058 if (dma_mapping_error(device->dev, dest)) 1058 if (!unmap)
1059 goto err1; 1059 goto err_wait;
1060 1060
1061 src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE); 1061 unmap->len = len;
1062 if (dma_mapping_error(device->dev, src)) 1062 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1063 goto err2; 1063 pay_off, len, DMA_TO_DEVICE);
1064 if (dma_mapping_error(device->dev, unmap->addr[0]))
1065 goto err_get_unmap;
1066
1067 unmap->to_cnt = 1;
1064 1068
1065 flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE | 1069 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1070 buff_off, len, DMA_FROM_DEVICE);
1071 if (dma_mapping_error(device->dev, unmap->addr[1]))
1072 goto err_get_unmap;
1073
1074 unmap->from_cnt = 1;
1075
1076 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
1066 DMA_PREP_INTERRUPT; 1077 DMA_PREP_INTERRUPT;
1067 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); 1078 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1079 unmap->addr[0], len, flags);
1068 if (!txd) 1080 if (!txd)
1069 goto err3; 1081 goto err_get_unmap;
1070 1082
1071 txd->callback = ntb_rx_copy_callback; 1083 txd->callback = ntb_rx_copy_callback;
1072 txd->callback_param = entry; 1084 txd->callback_param = entry;
1085 dma_set_unmap(txd, unmap);
1073 1086
1074 cookie = dmaengine_submit(txd); 1087 cookie = dmaengine_submit(txd);
1075 if (dma_submit_error(cookie)) 1088 if (dma_submit_error(cookie))
1076 goto err3; 1089 goto err_set_unmap;
1090
1091 dmaengine_unmap_put(unmap);
1077 1092
1078 qp->last_cookie = cookie; 1093 qp->last_cookie = cookie;
1079 1094
@@ -1081,11 +1096,11 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1081 1096
1082 return; 1097 return;
1083 1098
1084err3: 1099err_set_unmap:
1085 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); 1100 dmaengine_unmap_put(unmap);
1086err2: 1101err_get_unmap:
1087 dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE); 1102 dmaengine_unmap_put(unmap);
1088err1: 1103err_wait:
1089 /* If the callbacks come out of order, the writing of the index to the 1104 /* If the callbacks come out of order, the writing of the index to the
1090 * last completed will be out of order. This may result in the 1105 * last completed will be out of order. This may result in the
1091 * receive stalling forever. 1106 * receive stalling forever.
@@ -1245,7 +1260,8 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1245 struct dma_chan *chan = qp->dma_chan; 1260 struct dma_chan *chan = qp->dma_chan;
1246 struct dma_device *device; 1261 struct dma_device *device;
1247 size_t dest_off, buff_off; 1262 size_t dest_off, buff_off;
1248 dma_addr_t src, dest; 1263 struct dmaengine_unmap_data *unmap;
1264 dma_addr_t dest;
1249 dma_cookie_t cookie; 1265 dma_cookie_t cookie;
1250 void __iomem *offset; 1266 void __iomem *offset;
1251 size_t len = entry->len; 1267 size_t len = entry->len;
@@ -1273,28 +1289,43 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
1273 if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) 1289 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1274 goto err; 1290 goto err;
1275 1291
1276 src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE); 1292 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1277 if (dma_mapping_error(device->dev, src)) 1293 if (!unmap)
1278 goto err; 1294 goto err;
1279 1295
1280 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT; 1296 unmap->len = len;
1281 txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags); 1297 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1298 buff_off, len, DMA_TO_DEVICE);
1299 if (dma_mapping_error(device->dev, unmap->addr[0]))
1300 goto err_get_unmap;
1301
1302 unmap->to_cnt = 1;
1303
1304 flags = DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP |
1305 DMA_PREP_INTERRUPT;
1306 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1307 flags);
1282 if (!txd) 1308 if (!txd)
1283 goto err1; 1309 goto err_get_unmap;
1284 1310
1285 txd->callback = ntb_tx_copy_callback; 1311 txd->callback = ntb_tx_copy_callback;
1286 txd->callback_param = entry; 1312 txd->callback_param = entry;
1313 dma_set_unmap(txd, unmap);
1287 1314
1288 cookie = dmaengine_submit(txd); 1315 cookie = dmaengine_submit(txd);
1289 if (dma_submit_error(cookie)) 1316 if (dma_submit_error(cookie))
1290 goto err1; 1317 goto err_set_unmap;
1318
1319 dmaengine_unmap_put(unmap);
1291 1320
1292 dma_async_issue_pending(chan); 1321 dma_async_issue_pending(chan);
1293 qp->tx_async++; 1322 qp->tx_async++;
1294 1323
1295 return; 1324 return;
1296err1: 1325err_set_unmap:
1297 dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE); 1326 dmaengine_unmap_put(unmap);
1327err_get_unmap:
1328 dmaengine_unmap_put(unmap);
1298err: 1329err:
1299 ntb_memcpy_tx(entry, offset); 1330 ntb_memcpy_tx(entry, offset);
1300 qp->tx_memcpy++; 1331 qp->tx_memcpy++;