aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c60
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h16
2 files changed, 38 insertions, 38 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 80dfe173deac..c9b3b9e0679f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -302,8 +302,8 @@ static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
302 fmr_param.pool_size = target->scsi_host->can_queue; 302 fmr_param.pool_size = target->scsi_host->can_queue;
303 fmr_param.dirty_watermark = fmr_param.pool_size / 4; 303 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
304 fmr_param.cache = 1; 304 fmr_param.cache = 1;
305 fmr_param.max_pages_per_fmr = dev->max_pages_per_fmr; 305 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
306 fmr_param.page_shift = ilog2(dev->fmr_page_size); 306 fmr_param.page_shift = ilog2(dev->mr_page_size);
307 fmr_param.access = (IB_ACCESS_LOCAL_WRITE | 307 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
308 IB_ACCESS_REMOTE_WRITE | 308 IB_ACCESS_REMOTE_WRITE |
309 IB_ACCESS_REMOTE_READ); 309 IB_ACCESS_REMOTE_READ);
@@ -657,7 +657,7 @@ static int srp_alloc_req_data(struct srp_target_port *target)
657 req = &target->req_ring[i]; 657 req = &target->req_ring[i];
658 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *), 658 req->fmr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
659 GFP_KERNEL); 659 GFP_KERNEL);
660 req->map_page = kmalloc(srp_dev->max_pages_per_fmr * 660 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
661 sizeof(void *), GFP_KERNEL); 661 sizeof(void *), GFP_KERNEL);
662 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); 662 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
663 if (!req->fmr_list || !req->map_page || !req->indirect_desc) 663 if (!req->fmr_list || !req->map_page || !req->indirect_desc)
@@ -810,7 +810,7 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
810 return; 810 return;
811 811
812 pfmr = req->fmr_list; 812 pfmr = req->fmr_list;
813 while (req->nfmr--) 813 while (req->nmdesc--)
814 ib_fmr_pool_unmap(*pfmr++); 814 ib_fmr_pool_unmap(*pfmr++);
815 815
816 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd), 816 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
@@ -979,9 +979,9 @@ static int srp_map_finish_fmr(struct srp_map_state *state,
979 return PTR_ERR(fmr); 979 return PTR_ERR(fmr);
980 980
981 *state->next_fmr++ = fmr; 981 *state->next_fmr++ = fmr;
982 state->nfmr++; 982 state->nmdesc++;
983 983
984 srp_map_desc(state, 0, state->fmr_len, fmr->fmr->rkey); 984 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
985 985
986 return 0; 986 return 0;
987} 987}
@@ -995,14 +995,14 @@ static int srp_finish_mapping(struct srp_map_state *state,
995 return 0; 995 return 0;
996 996
997 if (state->npages == 1 && !register_always) 997 if (state->npages == 1 && !register_always)
998 srp_map_desc(state, state->base_dma_addr, state->fmr_len, 998 srp_map_desc(state, state->base_dma_addr, state->dma_len,
999 target->rkey); 999 target->rkey);
1000 else 1000 else
1001 ret = srp_map_finish_fmr(state, target); 1001 ret = srp_map_finish_fmr(state, target);
1002 1002
1003 if (ret == 0) { 1003 if (ret == 0) {
1004 state->npages = 0; 1004 state->npages = 0;
1005 state->fmr_len = 0; 1005 state->dma_len = 0;
1006 } 1006 }
1007 1007
1008 return ret; 1008 return ret;
@@ -1047,7 +1047,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1047 * that were never quite defined, but went away when the initiator 1047 * that were never quite defined, but went away when the initiator
1048 * avoided using FMR on such page fragments. 1048 * avoided using FMR on such page fragments.
1049 */ 1049 */
1050 if (dma_addr & ~dev->fmr_page_mask || dma_len > dev->fmr_max_size) { 1050 if (dma_addr & ~dev->mr_page_mask || dma_len > dev->mr_max_size) {
1051 ret = srp_finish_mapping(state, target); 1051 ret = srp_finish_mapping(state, target);
1052 if (ret) 1052 if (ret)
1053 return ret; 1053 return ret;
@@ -1066,7 +1066,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1066 srp_map_update_start(state, sg, sg_index, dma_addr); 1066 srp_map_update_start(state, sg, sg_index, dma_addr);
1067 1067
1068 while (dma_len) { 1068 while (dma_len) {
1069 if (state->npages == dev->max_pages_per_fmr) { 1069 if (state->npages == dev->max_pages_per_mr) {
1070 ret = srp_finish_mapping(state, target); 1070 ret = srp_finish_mapping(state, target);
1071 if (ret) 1071 if (ret)
1072 return ret; 1072 return ret;
@@ -1074,12 +1074,12 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1074 srp_map_update_start(state, sg, sg_index, dma_addr); 1074 srp_map_update_start(state, sg, sg_index, dma_addr);
1075 } 1075 }
1076 1076
1077 len = min_t(unsigned int, dma_len, dev->fmr_page_size); 1077 len = min_t(unsigned int, dma_len, dev->mr_page_size);
1078 1078
1079 if (!state->npages) 1079 if (!state->npages)
1080 state->base_dma_addr = dma_addr; 1080 state->base_dma_addr = dma_addr;
1081 state->pages[state->npages++] = dma_addr; 1081 state->pages[state->npages++] = dma_addr;
1082 state->fmr_len += len; 1082 state->dma_len += len;
1083 dma_addr += len; 1083 dma_addr += len;
1084 dma_len -= len; 1084 dma_len -= len;
1085 } 1085 }
@@ -1089,7 +1089,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
1089 * boundries. 1089 * boundries.
1090 */ 1090 */
1091 ret = 0; 1091 ret = 0;
1092 if (len != dev->fmr_page_size) { 1092 if (len != dev->mr_page_size) {
1093 ret = srp_finish_mapping(state, target); 1093 ret = srp_finish_mapping(state, target);
1094 if (!ret) 1094 if (!ret)
1095 srp_map_update_start(state, NULL, 0, 0); 1095 srp_map_update_start(state, NULL, 0, 0);
@@ -1136,7 +1136,7 @@ backtrack:
1136 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_finish_mapping(state, target)) 1136 if (use_fmr == SRP_MAP_ALLOW_FMR && srp_finish_mapping(state, target))
1137 goto backtrack; 1137 goto backtrack;
1138 1138
1139 req->nfmr = state->nfmr; 1139 req->nmdesc = state->nmdesc;
1140} 1140}
1141 1141
1142static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, 1142static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
@@ -1189,7 +1189,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
1189 buf->key = cpu_to_be32(target->rkey); 1189 buf->key = cpu_to_be32(target->rkey);
1190 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 1190 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1191 1191
1192 req->nfmr = 0; 1192 req->nmdesc = 0;
1193 goto map_complete; 1193 goto map_complete;
1194 } 1194 }
1195 1195
@@ -1637,7 +1637,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1637 /* 1637 /*
1638 * If we ran out of memory descriptors (-ENOMEM) because an 1638 * If we ran out of memory descriptors (-ENOMEM) because an
1639 * application is queuing many requests with more than 1639 * application is queuing many requests with more than
1640 * max_pages_per_fmr sg-list elements, tell the SCSI mid-layer 1640 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
1641 * to reduce queue depth temporarily. 1641 * to reduce queue depth temporarily.
1642 */ 1642 */
1643 scmnd->result = len == -ENOMEM ? 1643 scmnd->result = len == -ENOMEM ?
@@ -2878,8 +2878,8 @@ static void srp_add_one(struct ib_device *device)
2878 struct srp_device *srp_dev; 2878 struct srp_device *srp_dev;
2879 struct ib_device_attr *dev_attr; 2879 struct ib_device_attr *dev_attr;
2880 struct srp_host *host; 2880 struct srp_host *host;
2881 int fmr_page_shift, s, e, p; 2881 int mr_page_shift, s, e, p;
2882 u64 max_pages_per_fmr; 2882 u64 max_pages_per_mr;
2883 2883
2884 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL); 2884 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2885 if (!dev_attr) 2885 if (!dev_attr)
@@ -2902,18 +2902,18 @@ static void srp_add_one(struct ib_device *device)
2902 * minimum of 4096 bytes. We're unlikely to build large sglists 2902 * minimum of 4096 bytes. We're unlikely to build large sglists
2903 * out of smaller entries. 2903 * out of smaller entries.
2904 */ 2904 */
2905 fmr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1); 2905 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
2906 srp_dev->fmr_page_size = 1 << fmr_page_shift; 2906 srp_dev->mr_page_size = 1 << mr_page_shift;
2907 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); 2907 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
2908 max_pages_per_fmr = dev_attr->max_mr_size; 2908 max_pages_per_mr = dev_attr->max_mr_size;
2909 do_div(max_pages_per_fmr, srp_dev->fmr_page_size); 2909 do_div(max_pages_per_mr, srp_dev->mr_page_size);
2910 srp_dev->max_pages_per_fmr = min_t(u64, SRP_FMR_SIZE, 2910 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
2911 max_pages_per_fmr); 2911 max_pages_per_mr);
2912 srp_dev->fmr_max_size = srp_dev->fmr_page_size * 2912 srp_dev->mr_max_size = srp_dev->mr_page_size *
2913 srp_dev->max_pages_per_fmr; 2913 srp_dev->max_pages_per_mr;
2914 pr_debug("%s: fmr_page_shift = %d, dev_attr->max_mr_size = %#llx, max_pages_per_fmr = %d, fmr_max_size = %#x\n", 2914 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, max_pages_per_mr = %d, mr_max_size = %#x\n",
2915 device->name, fmr_page_shift, dev_attr->max_mr_size, 2915 device->name, mr_page_shift, dev_attr->max_mr_size,
2916 srp_dev->max_pages_per_fmr, srp_dev->fmr_max_size); 2916 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
2917 2917
2918 INIT_LIST_HEAD(&srp_dev->dev_list); 2918 INIT_LIST_HEAD(&srp_dev->dev_list);
2919 2919
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 2d99e52f2f5c..eb130486b1c8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -66,7 +66,7 @@ enum {
66 SRP_TAG_NO_REQ = ~0U, 66 SRP_TAG_NO_REQ = ~0U,
67 SRP_TAG_TSK_MGMT = 1U << 31, 67 SRP_TAG_TSK_MGMT = 1U << 31,
68 68
69 SRP_FMR_SIZE = 512, 69 SRP_MAX_PAGES_PER_MR = 512,
70 70
71 SRP_MAP_ALLOW_FMR = 0, 71 SRP_MAP_ALLOW_FMR = 0,
72 SRP_MAP_NO_FMR = 1, 72 SRP_MAP_NO_FMR = 1,
@@ -88,10 +88,10 @@ struct srp_device {
88 struct ib_device *dev; 88 struct ib_device *dev;
89 struct ib_pd *pd; 89 struct ib_pd *pd;
90 struct ib_mr *mr; 90 struct ib_mr *mr;
91 u64 fmr_page_mask; 91 u64 mr_page_mask;
92 int fmr_page_size; 92 int mr_page_size;
93 int fmr_max_size; 93 int mr_max_size;
94 int max_pages_per_fmr; 94 int max_pages_per_mr;
95 bool has_fmr; 95 bool has_fmr;
96}; 96};
97 97
@@ -114,7 +114,7 @@ struct srp_request {
114 u64 *map_page; 114 u64 *map_page;
115 struct srp_direct_buf *indirect_desc; 115 struct srp_direct_buf *indirect_desc;
116 dma_addr_t indirect_dma_addr; 116 dma_addr_t indirect_dma_addr;
117 short nfmr; 117 short nmdesc;
118 short index; 118 short index;
119}; 119};
120 120
@@ -201,10 +201,10 @@ struct srp_map_state {
201 struct srp_direct_buf *desc; 201 struct srp_direct_buf *desc;
202 u64 *pages; 202 u64 *pages;
203 dma_addr_t base_dma_addr; 203 dma_addr_t base_dma_addr;
204 u32 fmr_len; 204 u32 dma_len;
205 u32 total_len; 205 u32 total_len;
206 unsigned int npages; 206 unsigned int npages;
207 unsigned int nfmr; 207 unsigned int nmdesc;
208 unsigned int ndesc; 208 unsigned int ndesc;
209 struct scatterlist *unmapped_sg; 209 struct scatterlist *unmapped_sg;
210 int unmapped_index; 210 int unmapped_index;