aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c187
1 files changed, 120 insertions, 67 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 54a15b5d336d..ec8993a7b3be 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -687,7 +687,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
687 int access_flags) 687 int access_flags)
688{ 688{
689 struct mlx5_ib_dev *dev = to_mdev(pd->device); 689 struct mlx5_ib_dev *dev = to_mdev(pd->device);
690 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; 690 struct mlx5_umr_wr *umrwr = umr_wr(wr);
691 691
692 sg->addr = dma; 692 sg->addr = dma;
693 sg->length = ALIGN(sizeof(u64) * n, 64); 693 sg->length = ALIGN(sizeof(u64) * n, 64);
@@ -715,7 +715,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
715static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, 715static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
716 struct ib_send_wr *wr, u32 key) 716 struct ib_send_wr *wr, u32 key)
717{ 717{
718 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; 718 struct mlx5_umr_wr *umrwr = umr_wr(wr);
719 719
720 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE; 720 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
721 wr->opcode = MLX5_IB_WR_UMR; 721 wr->opcode = MLX5_IB_WR_UMR;
@@ -752,7 +752,8 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
752 struct device *ddev = dev->ib_dev.dma_device; 752 struct device *ddev = dev->ib_dev.dma_device;
753 struct umr_common *umrc = &dev->umrc; 753 struct umr_common *umrc = &dev->umrc;
754 struct mlx5_ib_umr_context umr_context; 754 struct mlx5_ib_umr_context umr_context;
755 struct ib_send_wr wr, *bad; 755 struct mlx5_umr_wr umrwr;
756 struct ib_send_wr *bad;
756 struct mlx5_ib_mr *mr; 757 struct mlx5_ib_mr *mr;
757 struct ib_sge sg; 758 struct ib_sge sg;
758 int size; 759 int size;
@@ -798,14 +799,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
798 goto free_pas; 799 goto free_pas;
799 } 800 }
800 801
801 memset(&wr, 0, sizeof(wr)); 802 memset(&umrwr, 0, sizeof(umrwr));
802 wr.wr_id = (u64)(unsigned long)&umr_context; 803 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
803 prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift, 804 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
804 virt_addr, len, access_flags); 805 page_shift, virt_addr, len, access_flags);
805 806
806 mlx5_ib_init_umr_context(&umr_context); 807 mlx5_ib_init_umr_context(&umr_context);
807 down(&umrc->sem); 808 down(&umrc->sem);
808 err = ib_post_send(umrc->qp, &wr, &bad); 809 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
809 if (err) { 810 if (err) {
810 mlx5_ib_warn(dev, "post send failed, err %d\n", err); 811 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
811 goto unmap_dma; 812 goto unmap_dma;
@@ -851,8 +852,8 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
851 int size; 852 int size;
852 __be64 *pas; 853 __be64 *pas;
853 dma_addr_t dma; 854 dma_addr_t dma;
854 struct ib_send_wr wr, *bad; 855 struct ib_send_wr *bad;
855 struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg; 856 struct mlx5_umr_wr wr;
856 struct ib_sge sg; 857 struct ib_sge sg;
857 int err = 0; 858 int err = 0;
858 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64); 859 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
@@ -917,26 +918,26 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
917 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); 918 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
918 919
919 memset(&wr, 0, sizeof(wr)); 920 memset(&wr, 0, sizeof(wr));
920 wr.wr_id = (u64)(unsigned long)&umr_context; 921 wr.wr.wr_id = (u64)(unsigned long)&umr_context;
921 922
922 sg.addr = dma; 923 sg.addr = dma;
923 sg.length = ALIGN(npages * sizeof(u64), 924 sg.length = ALIGN(npages * sizeof(u64),
924 MLX5_UMR_MTT_ALIGNMENT); 925 MLX5_UMR_MTT_ALIGNMENT);
925 sg.lkey = dev->umrc.pd->local_dma_lkey; 926 sg.lkey = dev->umrc.pd->local_dma_lkey;
926 927
927 wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | 928 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
928 MLX5_IB_SEND_UMR_UPDATE_MTT; 929 MLX5_IB_SEND_UMR_UPDATE_MTT;
929 wr.sg_list = &sg; 930 wr.wr.sg_list = &sg;
930 wr.num_sge = 1; 931 wr.wr.num_sge = 1;
931 wr.opcode = MLX5_IB_WR_UMR; 932 wr.wr.opcode = MLX5_IB_WR_UMR;
932 umrwr->npages = sg.length / sizeof(u64); 933 wr.npages = sg.length / sizeof(u64);
933 umrwr->page_shift = PAGE_SHIFT; 934 wr.page_shift = PAGE_SHIFT;
934 umrwr->mkey = mr->mmr.key; 935 wr.mkey = mr->mmr.key;
935 umrwr->target.offset = start_page_index; 936 wr.target.offset = start_page_index;
936 937
937 mlx5_ib_init_umr_context(&umr_context); 938 mlx5_ib_init_umr_context(&umr_context);
938 down(&umrc->sem); 939 down(&umrc->sem);
939 err = ib_post_send(umrc->qp, &wr, &bad); 940 err = ib_post_send(umrc->qp, &wr.wr, &bad);
940 if (err) { 941 if (err) {
941 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err); 942 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
942 } else { 943 } else {
@@ -1122,16 +1123,17 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1122{ 1123{
1123 struct umr_common *umrc = &dev->umrc; 1124 struct umr_common *umrc = &dev->umrc;
1124 struct mlx5_ib_umr_context umr_context; 1125 struct mlx5_ib_umr_context umr_context;
1125 struct ib_send_wr wr, *bad; 1126 struct mlx5_umr_wr umrwr;
1127 struct ib_send_wr *bad;
1126 int err; 1128 int err;
1127 1129
1128 memset(&wr, 0, sizeof(wr)); 1130 memset(&umrwr.wr, 0, sizeof(umrwr));
1129 wr.wr_id = (u64)(unsigned long)&umr_context; 1131 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
1130 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); 1132 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
1131 1133
1132 mlx5_ib_init_umr_context(&umr_context); 1134 mlx5_ib_init_umr_context(&umr_context);
1133 down(&umrc->sem); 1135 down(&umrc->sem);
1134 err = ib_post_send(umrc->qp, &wr, &bad); 1136 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1135 if (err) { 1137 if (err) {
1136 up(&umrc->sem); 1138 up(&umrc->sem);
1137 mlx5_ib_dbg(dev, "err %d\n", err); 1139 mlx5_ib_dbg(dev, "err %d\n", err);
@@ -1151,6 +1153,52 @@ error:
1151 return err; 1153 return err;
1152} 1154}
1153 1155
1156static int
1157mlx5_alloc_priv_descs(struct ib_device *device,
1158 struct mlx5_ib_mr *mr,
1159 int ndescs,
1160 int desc_size)
1161{
1162 int size = ndescs * desc_size;
1163 int add_size;
1164 int ret;
1165
1166 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1167
1168 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1169 if (!mr->descs_alloc)
1170 return -ENOMEM;
1171
1172 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1173
1174 mr->desc_map = dma_map_single(device->dma_device, mr->descs,
1175 size, DMA_TO_DEVICE);
1176 if (dma_mapping_error(device->dma_device, mr->desc_map)) {
1177 ret = -ENOMEM;
1178 goto err;
1179 }
1180
1181 return 0;
1182err:
1183 kfree(mr->descs_alloc);
1184
1185 return ret;
1186}
1187
1188static void
1189mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1190{
1191 if (mr->descs) {
1192 struct ib_device *device = mr->ibmr.device;
1193 int size = mr->max_descs * mr->desc_size;
1194
1195 dma_unmap_single(device->dma_device, mr->desc_map,
1196 size, DMA_TO_DEVICE);
1197 kfree(mr->descs_alloc);
1198 mr->descs = NULL;
1199 }
1200}
1201
1154static int clean_mr(struct mlx5_ib_mr *mr) 1202static int clean_mr(struct mlx5_ib_mr *mr)
1155{ 1203{
1156 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1204 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
@@ -1170,6 +1218,8 @@ static int clean_mr(struct mlx5_ib_mr *mr)
1170 mr->sig = NULL; 1218 mr->sig = NULL;
1171 } 1219 }
1172 1220
1221 mlx5_free_priv_descs(mr);
1222
1173 if (!umred) { 1223 if (!umred) {
1174 err = destroy_mkey(dev, mr); 1224 err = destroy_mkey(dev, mr);
1175 if (err) { 1225 if (err) {
@@ -1259,6 +1309,14 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1259 if (mr_type == IB_MR_TYPE_MEM_REG) { 1309 if (mr_type == IB_MR_TYPE_MEM_REG) {
1260 access_mode = MLX5_ACCESS_MODE_MTT; 1310 access_mode = MLX5_ACCESS_MODE_MTT;
1261 in->seg.log2_page_size = PAGE_SHIFT; 1311 in->seg.log2_page_size = PAGE_SHIFT;
1312
1313 err = mlx5_alloc_priv_descs(pd->device, mr,
1314 ndescs, sizeof(u64));
1315 if (err)
1316 goto err_free_in;
1317
1318 mr->desc_size = sizeof(u64);
1319 mr->max_descs = ndescs;
1262 } else if (mr_type == IB_MR_TYPE_SIGNATURE) { 1320 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1263 u32 psv_index[2]; 1321 u32 psv_index[2];
1264 1322
@@ -1315,6 +1373,7 @@ err_destroy_psv:
1315 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1373 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1316 mr->sig->psv_wire.psv_idx); 1374 mr->sig->psv_wire.psv_idx);
1317 } 1375 }
1376 mlx5_free_priv_descs(mr);
1318err_free_sig: 1377err_free_sig:
1319 kfree(mr->sig); 1378 kfree(mr->sig);
1320err_free_in: 1379err_free_in:
@@ -1324,48 +1383,6 @@ err_free:
1324 return ERR_PTR(err); 1383 return ERR_PTR(err);
1325} 1384}
1326 1385
1327struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
1328 int page_list_len)
1329{
1330 struct mlx5_ib_fast_reg_page_list *mfrpl;
1331 int size = page_list_len * sizeof(u64);
1332
1333 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
1334 if (!mfrpl)
1335 return ERR_PTR(-ENOMEM);
1336
1337 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
1338 if (!mfrpl->ibfrpl.page_list)
1339 goto err_free;
1340
1341 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
1342 size, &mfrpl->map,
1343 GFP_KERNEL);
1344 if (!mfrpl->mapped_page_list)
1345 goto err_free;
1346
1347 WARN_ON(mfrpl->map & 0x3f);
1348
1349 return &mfrpl->ibfrpl;
1350
1351err_free:
1352 kfree(mfrpl->ibfrpl.page_list);
1353 kfree(mfrpl);
1354 return ERR_PTR(-ENOMEM);
1355}
1356
1357void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1358{
1359 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1360 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1361 int size = page_list->max_page_list_len * sizeof(u64);
1362
1363 dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
1364 mfrpl->map);
1365 kfree(mfrpl->ibfrpl.page_list);
1366 kfree(mfrpl);
1367}
1368
1369int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 1386int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1370 struct ib_mr_status *mr_status) 1387 struct ib_mr_status *mr_status)
1371{ 1388{
@@ -1406,3 +1423,39 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1406done: 1423done:
1407 return ret; 1424 return ret;
1408} 1425}
1426
1427static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1428{
1429 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1430 __be64 *descs;
1431
1432 if (unlikely(mr->ndescs == mr->max_descs))
1433 return -ENOMEM;
1434
1435 descs = mr->descs;
1436 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1437
1438 return 0;
1439}
1440
1441int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
1442 struct scatterlist *sg,
1443 int sg_nents)
1444{
1445 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1446 int n;
1447
1448 mr->ndescs = 0;
1449
1450 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1451 mr->desc_size * mr->max_descs,
1452 DMA_TO_DEVICE);
1453
1454 n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
1455
1456 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1457 mr->desc_size * mr->max_descs,
1458 DMA_TO_DEVICE);
1459
1460 return n;
1461}