summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2019-01-25 13:34:50 -0500
committerMartin K. Petersen <martin.petersen@oracle.com>2019-02-04 21:31:37 -0500
commitfd1b6687091450b0ca3303c97f45ecd87401c3d7 (patch)
treefd501bf5ed6adf087faf2ffa22fbf1a949f569cf /drivers/infiniband/ulp
parent337ec69ed761ae02a5d1226d607c487fedbd8dd7 (diff)
scsi: RDMA/srpt: Rework I/O context allocation
Instead of maintaining a list of free I/O contexts, use an sbitmap data structure to track which I/O contexts are in use and which are free. This makes the ib_srpt driver more consistent with other LIO drivers. Cc: Doug Ledford <dledford@redhat.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Nicholas Bellinger <nab@linux-iscsi.org> Cc: Mike Christie <mchristi@redhat.com> Cc: Hannes Reinecke <hare@suse.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c47
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h4
2 files changed, 19 insertions, 32 deletions
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 0dc2df925f76..d426e01905f9 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1217,22 +1217,15 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
1217static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) 1217static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1218{ 1218{
1219 struct srpt_send_ioctx *ioctx; 1219 struct srpt_send_ioctx *ioctx;
1220 unsigned long flags; 1220 int tag, cpu;
1221 1221
1222 BUG_ON(!ch); 1222 BUG_ON(!ch);
1223 1223
1224 ioctx = NULL; 1224 tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
1225 spin_lock_irqsave(&ch->spinlock, flags); 1225 if (tag < 0)
1226 if (!list_empty(&ch->free_list)) { 1226 return NULL;
1227 ioctx = list_first_entry(&ch->free_list,
1228 struct srpt_send_ioctx, free_list);
1229 list_del(&ioctx->free_list);
1230 }
1231 spin_unlock_irqrestore(&ch->spinlock, flags);
1232
1233 if (!ioctx)
1234 return ioctx;
1235 1227
1228 ioctx = ch->ioctx_ring[tag];
1236 BUG_ON(ioctx->ch != ch); 1229 BUG_ON(ioctx->ch != ch);
1237 ioctx->state = SRPT_STATE_NEW; 1230 ioctx->state = SRPT_STATE_NEW;
1238 WARN_ON_ONCE(ioctx->recv_ioctx); 1231 WARN_ON_ONCE(ioctx->recv_ioctx);
@@ -1245,6 +1238,8 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
1245 */ 1238 */
1246 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd)); 1239 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1247 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data)); 1240 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1241 ioctx->cmd.map_tag = tag;
1242 ioctx->cmd.map_cpu = cpu;
1248 1243
1249 return ioctx; 1244 return ioctx;
1250} 1245}
@@ -2148,7 +2143,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
2148 struct srpt_rdma_ch *ch = NULL; 2143 struct srpt_rdma_ch *ch = NULL;
2149 char i_port_id[36]; 2144 char i_port_id[36];
2150 u32 it_iu_len; 2145 u32 it_iu_len;
2151 int i, ret; 2146 int i, tag_num, tag_size, ret;
2152 2147
2153 WARN_ON_ONCE(irqs_disabled()); 2148 WARN_ON_ONCE(irqs_disabled());
2154 2149
@@ -2248,11 +2243,8 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
2248 goto free_rsp_cache; 2243 goto free_rsp_cache;
2249 } 2244 }
2250 2245
2251 INIT_LIST_HEAD(&ch->free_list); 2246 for (i = 0; i < ch->rq_size; i++)
2252 for (i = 0; i < ch->rq_size; i++) {
2253 ch->ioctx_ring[i]->ch = ch; 2247 ch->ioctx_ring[i]->ch = ch;
2254 list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
2255 }
2256 if (!sdev->use_srq) { 2248 if (!sdev->use_srq) {
2257 u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ? 2249 u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
2258 be16_to_cpu(req->imm_data_offset) : 0; 2250 be16_to_cpu(req->imm_data_offset) : 0;
@@ -2306,18 +2298,20 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
2306 2298
2307 pr_debug("registering session %s\n", ch->sess_name); 2299 pr_debug("registering session %s\n", ch->sess_name);
2308 2300
2301 tag_num = ch->rq_size;
2302 tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
2309 if (sport->port_guid_tpg.se_tpg_wwn) 2303 if (sport->port_guid_tpg.se_tpg_wwn)
2310 ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0, 2304 ch->sess = target_setup_session(&sport->port_guid_tpg, tag_num,
2311 TARGET_PROT_NORMAL, 2305 tag_size, TARGET_PROT_NORMAL,
2312 ch->sess_name, ch, NULL); 2306 ch->sess_name, ch, NULL);
2313 if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess)) 2307 if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2314 ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0, 2308 ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
2315 TARGET_PROT_NORMAL, i_port_id, ch, 2309 tag_size, TARGET_PROT_NORMAL, i_port_id,
2316 NULL); 2310 ch, NULL);
2317 /* Retry without leading "0x" */ 2311 /* Retry without leading "0x" */
2318 if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess)) 2312 if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
2319 ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0, 2313 ch->sess = target_setup_session(&sport->port_gid_tpg, tag_num,
2320 TARGET_PROT_NORMAL, 2314 tag_size, TARGET_PROT_NORMAL,
2321 i_port_id + 2, ch, NULL); 2315 i_port_id + 2, ch, NULL);
2322 if (IS_ERR_OR_NULL(ch->sess)) { 2316 if (IS_ERR_OR_NULL(ch->sess)) {
2323 WARN_ON_ONCE(ch->sess == NULL); 2317 WARN_ON_ONCE(ch->sess == NULL);
@@ -3279,7 +3273,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
3279 struct srpt_send_ioctx, cmd); 3273 struct srpt_send_ioctx, cmd);
3280 struct srpt_rdma_ch *ch = ioctx->ch; 3274 struct srpt_rdma_ch *ch = ioctx->ch;
3281 struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx; 3275 struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
3282 unsigned long flags;
3283 3276
3284 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE && 3277 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
3285 !(ioctx->cmd.transport_state & CMD_T_ABORTED)); 3278 !(ioctx->cmd.transport_state & CMD_T_ABORTED));
@@ -3295,9 +3288,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
3295 ioctx->n_rw_ctx = 0; 3288 ioctx->n_rw_ctx = 0;
3296 } 3289 }
3297 3290
3298 spin_lock_irqsave(&ch->spinlock, flags); 3291 target_free_tag(se_cmd->se_sess, se_cmd);
3299 list_add(&ioctx->free_list, &ch->free_list);
3300 spin_unlock_irqrestore(&ch->spinlock, flags);
3301} 3292}
3302 3293
3303/** 3294/**
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index 39b3e50baf3d..ee9f20e9177a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -207,7 +207,6 @@ struct srpt_rw_ctx {
207 * @rw_ctxs: RDMA read/write contexts. 207 * @rw_ctxs: RDMA read/write contexts.
208 * @imm_sg: Scatterlist for immediate data. 208 * @imm_sg: Scatterlist for immediate data.
209 * @rdma_cqe: RDMA completion queue element. 209 * @rdma_cqe: RDMA completion queue element.
210 * @free_list: Node in srpt_rdma_ch.free_list.
211 * @state: I/O context state. 210 * @state: I/O context state.
212 * @cmd: Target core command data structure. 211 * @cmd: Target core command data structure.
213 * @sense_data: SCSI sense data. 212 * @sense_data: SCSI sense data.
@@ -227,7 +226,6 @@ struct srpt_send_ioctx {
227 struct scatterlist imm_sg; 226 struct scatterlist imm_sg;
228 227
229 struct ib_cqe rdma_cqe; 228 struct ib_cqe rdma_cqe;
230 struct list_head free_list;
231 enum srpt_command_state state; 229 enum srpt_command_state state;
232 struct se_cmd cmd; 230 struct se_cmd cmd;
233 u8 n_rdma; 231 u8 n_rdma;
@@ -277,7 +275,6 @@ enum rdma_ch_state {
277 * @req_lim_delta: Number of credits not yet sent back to the initiator. 275 * @req_lim_delta: Number of credits not yet sent back to the initiator.
278 * @imm_data_offset: Offset from start of SRP_CMD for immediate data. 276 * @imm_data_offset: Offset from start of SRP_CMD for immediate data.
279 * @spinlock: Protects free_list and state. 277 * @spinlock: Protects free_list and state.
280 * @free_list: Head of list with free send I/O contexts.
281 * @state: channel state. See also enum rdma_ch_state. 278 * @state: channel state. See also enum rdma_ch_state.
282 * @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel. 279 * @using_rdma_cm: Whether the RDMA/CM or IB/CM is used for this channel.
283 * @processing_wait_list: Whether or not cmd_wait_list is being processed. 280 * @processing_wait_list: Whether or not cmd_wait_list is being processed.
@@ -318,7 +315,6 @@ struct srpt_rdma_ch {
318 atomic_t req_lim_delta; 315 atomic_t req_lim_delta;
319 u16 imm_data_offset; 316 u16 imm_data_offset;
320 spinlock_t spinlock; 317 spinlock_t spinlock;
321 struct list_head free_list;
322 enum rdma_ch_state state; 318 enum rdma_ch_state state;
323 struct kmem_cache *rsp_buf_cache; 319 struct kmem_cache *rsp_buf_cache;
324 struct srpt_send_ioctx **ioctx_ring; 320 struct srpt_send_ioctx **ioctx_ring;