summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-09-05 06:56:19 -0400
committerDoug Ledford <dledford@redhat.com>2016-09-23 13:47:44 -0400
commit5f071777f9cbd71faa00eb854d15f42ae74e0471 (patch)
tree74a8a09b446f09b9e69e1fd0f5736a3d015b0002 /drivers/infiniband/ulp
parent8e61212d05963a3beecb8bf124b88a0b13a9600d (diff)
IB/srp: use IB_PD_UNSAFE_GLOBAL_RKEY
Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Max Gurtovoy <maxg@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c44
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h3
2 files changed, 20 insertions, 27 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 579b8aedfcdd..5ed1e8841a7b 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1262,6 +1262,7 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
1262{ 1262{
1263 struct srp_target_port *target = ch->target; 1263 struct srp_target_port *target = ch->target;
1264 struct srp_device *dev = target->srp_host->srp_dev; 1264 struct srp_device *dev = target->srp_host->srp_dev;
1265 struct ib_pd *pd = target->pd;
1265 struct ib_pool_fmr *fmr; 1266 struct ib_pool_fmr *fmr;
1266 u64 io_addr = 0; 1267 u64 io_addr = 0;
1267 1268
@@ -1273,9 +1274,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
1273 if (state->npages == 0) 1274 if (state->npages == 0)
1274 return 0; 1275 return 0;
1275 1276
1276 if (state->npages == 1 && target->global_mr) { 1277 if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1277 srp_map_desc(state, state->base_dma_addr, state->dma_len, 1278 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1278 target->global_mr->rkey); 1279 pd->unsafe_global_rkey);
1279 goto reset_state; 1280 goto reset_state;
1280 } 1281 }
1281 1282
@@ -1315,6 +1316,7 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1315{ 1316{
1316 struct srp_target_port *target = ch->target; 1317 struct srp_target_port *target = ch->target;
1317 struct srp_device *dev = target->srp_host->srp_dev; 1318 struct srp_device *dev = target->srp_host->srp_dev;
1319 struct ib_pd *pd = target->pd;
1318 struct ib_send_wr *bad_wr; 1320 struct ib_send_wr *bad_wr;
1319 struct ib_reg_wr wr; 1321 struct ib_reg_wr wr;
1320 struct srp_fr_desc *desc; 1322 struct srp_fr_desc *desc;
@@ -1326,12 +1328,12 @@ static int srp_map_finish_fr(struct srp_map_state *state,
1326 1328
1327 WARN_ON_ONCE(!dev->use_fast_reg); 1329 WARN_ON_ONCE(!dev->use_fast_reg);
1328 1330
1329 if (sg_nents == 1 && target->global_mr) { 1331 if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1330 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; 1332 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1331 1333
1332 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset, 1334 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1333 sg_dma_len(state->sg) - sg_offset, 1335 sg_dma_len(state->sg) - sg_offset,
1334 target->global_mr->rkey); 1336 pd->unsafe_global_rkey);
1335 if (sg_offset_p) 1337 if (sg_offset_p)
1336 *sg_offset_p = 0; 1338 *sg_offset_p = 0;
1337 return 1; 1339 return 1;
@@ -1491,7 +1493,7 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1491 for_each_sg(scat, sg, count, i) { 1493 for_each_sg(scat, sg, count, i) {
1492 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), 1494 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1493 ib_sg_dma_len(dev->dev, sg), 1495 ib_sg_dma_len(dev->dev, sg),
1494 target->global_mr->rkey); 1496 target->pd->unsafe_global_rkey);
1495 } 1497 }
1496 1498
1497 return 0; 1499 return 0;
@@ -1591,6 +1593,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1591 struct srp_request *req) 1593 struct srp_request *req)
1592{ 1594{
1593 struct srp_target_port *target = ch->target; 1595 struct srp_target_port *target = ch->target;
1596 struct ib_pd *pd = target->pd;
1594 struct scatterlist *scat; 1597 struct scatterlist *scat;
1595 struct srp_cmd *cmd = req->cmd->buf; 1598 struct srp_cmd *cmd = req->cmd->buf;
1596 int len, nents, count, ret; 1599 int len, nents, count, ret;
@@ -1626,7 +1629,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1626 fmt = SRP_DATA_DESC_DIRECT; 1629 fmt = SRP_DATA_DESC_DIRECT;
1627 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); 1630 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1628 1631
1629 if (count == 1 && target->global_mr) { 1632 if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1630 /* 1633 /*
1631 * The midlayer only generated a single gather/scatter 1634 * The midlayer only generated a single gather/scatter
1632 * entry, or DMA mapping coalesced everything to a 1635 * entry, or DMA mapping coalesced everything to a
@@ -1636,7 +1639,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1636 struct srp_direct_buf *buf = (void *) cmd->add_data; 1639 struct srp_direct_buf *buf = (void *) cmd->add_data;
1637 1640
1638 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 1641 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1639 buf->key = cpu_to_be32(target->global_mr->rkey); 1642 buf->key = cpu_to_be32(pd->unsafe_global_rkey);
1640 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 1643 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1641 1644
1642 req->nmdesc = 0; 1645 req->nmdesc = 0;
@@ -1709,14 +1712,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1709 memcpy(indirect_hdr->desc_list, req->indirect_desc, 1712 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1710 count * sizeof (struct srp_direct_buf)); 1713 count * sizeof (struct srp_direct_buf));
1711 1714
1712 if (!target->global_mr) { 1715 if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1713 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end, 1716 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1714 idb_len, &idb_rkey); 1717 idb_len, &idb_rkey);
1715 if (ret < 0) 1718 if (ret < 0)
1716 goto unmap; 1719 goto unmap;
1717 req->nmdesc++; 1720 req->nmdesc++;
1718 } else { 1721 } else {
1719 idb_rkey = cpu_to_be32(target->global_mr->rkey); 1722 idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
1720 } 1723 }
1721 1724
1722 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); 1725 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
@@ -3268,8 +3271,8 @@ static ssize_t srp_create_target(struct device *dev,
3268 target->io_class = SRP_REV16A_IB_IO_CLASS; 3271 target->io_class = SRP_REV16A_IB_IO_CLASS;
3269 target->scsi_host = target_host; 3272 target->scsi_host = target_host;
3270 target->srp_host = host; 3273 target->srp_host = host;
3274 target->pd = host->srp_dev->pd;
3271 target->lkey = host->srp_dev->pd->local_dma_lkey; 3275 target->lkey = host->srp_dev->pd->local_dma_lkey;
3272 target->global_mr = host->srp_dev->global_mr;
3273 target->cmd_sg_cnt = cmd_sg_entries; 3276 target->cmd_sg_cnt = cmd_sg_entries;
3274 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; 3277 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3275 target->allow_ext_sg = allow_ext_sg; 3278 target->allow_ext_sg = allow_ext_sg;
@@ -3524,6 +3527,7 @@ static void srp_add_one(struct ib_device *device)
3524 struct srp_host *host; 3527 struct srp_host *host;
3525 int mr_page_shift, p; 3528 int mr_page_shift, p;
3526 u64 max_pages_per_mr; 3529 u64 max_pages_per_mr;
3530 unsigned int flags = 0;
3527 3531
3528 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); 3532 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3529 if (!srp_dev) 3533 if (!srp_dev)
@@ -3558,6 +3562,10 @@ static void srp_add_one(struct ib_device *device)
3558 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr; 3562 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3559 } 3563 }
3560 3564
3565 if (never_register || !register_always ||
3566 (!srp_dev->has_fmr && !srp_dev->has_fr))
3567 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3568
3561 if (srp_dev->use_fast_reg) { 3569 if (srp_dev->use_fast_reg) {
3562 srp_dev->max_pages_per_mr = 3570 srp_dev->max_pages_per_mr =
3563 min_t(u32, srp_dev->max_pages_per_mr, 3571 min_t(u32, srp_dev->max_pages_per_mr,
@@ -3573,19 +3581,10 @@ static void srp_add_one(struct ib_device *device)
3573 INIT_LIST_HEAD(&srp_dev->dev_list); 3581 INIT_LIST_HEAD(&srp_dev->dev_list);
3574 3582
3575 srp_dev->dev = device; 3583 srp_dev->dev = device;
3576 srp_dev->pd = ib_alloc_pd(device, 0); 3584 srp_dev->pd = ib_alloc_pd(device, flags);
3577 if (IS_ERR(srp_dev->pd)) 3585 if (IS_ERR(srp_dev->pd))
3578 goto free_dev; 3586 goto free_dev;
3579 3587
3580 if (never_register || !register_always ||
3581 (!srp_dev->has_fmr && !srp_dev->has_fr)) {
3582 srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
3583 IB_ACCESS_LOCAL_WRITE |
3584 IB_ACCESS_REMOTE_READ |
3585 IB_ACCESS_REMOTE_WRITE);
3586 if (IS_ERR(srp_dev->global_mr))
3587 goto err_pd;
3588 }
3589 3588
3590 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { 3589 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3591 host = srp_add_port(srp_dev, p); 3590 host = srp_add_port(srp_dev, p);
@@ -3596,9 +3595,6 @@ static void srp_add_one(struct ib_device *device)
3596 ib_set_client_data(device, &srp_client, srp_dev); 3595 ib_set_client_data(device, &srp_client, srp_dev);
3597 return; 3596 return;
3598 3597
3599err_pd:
3600 ib_dealloc_pd(srp_dev->pd);
3601
3602free_dev: 3598free_dev:
3603 kfree(srp_dev); 3599 kfree(srp_dev);
3604} 3600}
@@ -3638,8 +3634,6 @@ static void srp_remove_one(struct ib_device *device, void *client_data)
3638 kfree(host); 3634 kfree(host);
3639 } 3635 }
3640 3636
3641 if (srp_dev->global_mr)
3642 ib_dereg_mr(srp_dev->global_mr);
3643 ib_dealloc_pd(srp_dev->pd); 3637 ib_dealloc_pd(srp_dev->pd);
3644 3638
3645 kfree(srp_dev); 3639 kfree(srp_dev);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 26bb9b0a7a63..21c69695f9d4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -90,7 +90,6 @@ struct srp_device {
90 struct list_head dev_list; 90 struct list_head dev_list;
91 struct ib_device *dev; 91 struct ib_device *dev;
92 struct ib_pd *pd; 92 struct ib_pd *pd;
93 struct ib_mr *global_mr;
94 u64 mr_page_mask; 93 u64 mr_page_mask;
95 int mr_page_size; 94 int mr_page_size;
96 int mr_max_size; 95 int mr_max_size;
@@ -179,7 +178,7 @@ struct srp_target_port {
179 spinlock_t lock; 178 spinlock_t lock;
180 179
181 /* read only in the hot path */ 180 /* read only in the hot path */
182 struct ib_mr *global_mr; 181 struct ib_pd *pd;
183 struct srp_rdma_ch *ch; 182 struct srp_rdma_ch *ch;
184 u32 ch_count; 183 u32 ch_count;
185 u32 lkey; 184 u32 lkey;