aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2017-11-29 19:47:32 -0500
committerChristoph Hellwig <hch@lst.de>2018-01-08 05:01:54 -0500
commit24431d60d3fbfd4c8c05e1828e5d9b35db4fd81c (patch)
tree8524825c4e27854a09e34f66987f0bcf0f27c378
parent6fda20283e55b9d288cd56822ce39fc8e64f2208 (diff)
nvme_fcloop: rework to remove xxx_IN_ISR feature flags
The existing fcloop driver expects the target side upcalls to the transport to context switch, thus the calls into the nvmet layer are not done in the calling context of the host/initiator down calls. The xxx_IN_ISR feature flags are used to select this logic. The xxx_IN_ISR feature flags should go away in the nvmet_fc transport as no other lldd utilizes them. Both Broadcom and Cavium lldds have their own non-ISR deferred handlers thus the nvmet calls can be made directly. This patch converts the paths that make the target upcalls (command receive, abort receive) such that they schedule a work item rather than expecting the transport to schedule the work item. The patch also cleans up the following: - The completion path from target to host scheduled a host work element called "work". Rename it "tio_done_work" for code clarity. - The abort io path called a iniwork item to call the host side io done. This is no longer needed as the abort routine can make the same call. Signed-off-by: James Smart <james.smart@broadcom.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--drivers/nvme/target/fcloop.c98
1 files changed, 63 insertions, 35 deletions
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index c0080f6ab2f5..c5015199c031 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -249,14 +249,15 @@ struct fcloop_fcpreq {
249 u16 status; 249 u16 status;
250 bool active; 250 bool active;
251 bool aborted; 251 bool aborted;
252 struct work_struct work; 252 struct work_struct fcp_rcv_work;
253 struct work_struct abort_rcv_work;
254 struct work_struct tio_done_work;
253 struct nvmefc_tgt_fcp_req tgt_fcp_req; 255 struct nvmefc_tgt_fcp_req tgt_fcp_req;
254}; 256};
255 257
256struct fcloop_ini_fcpreq { 258struct fcloop_ini_fcpreq {
257 struct nvmefc_fcp_req *fcpreq; 259 struct nvmefc_fcp_req *fcpreq;
258 struct fcloop_fcpreq *tfcp_req; 260 struct fcloop_fcpreq *tfcp_req;
259 struct work_struct iniwork;
260}; 261};
261 262
262static inline struct fcloop_lsreq * 263static inline struct fcloop_lsreq *
@@ -347,17 +348,58 @@ fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
347 return 0; 348 return 0;
348} 349}
349 350
350/*
351 * FCP IO operation done by initiator abort.
352 * call back up initiator "done" flows.
353 */
354static void 351static void
355fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work) 352fcloop_fcp_recv_work(struct work_struct *work)
353{
354 struct fcloop_fcpreq *tfcp_req =
355 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
356 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
357 struct fcloop_ini_fcpreq *inireq = NULL;
358 int ret = 0;
359
360 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
361 &tfcp_req->tgt_fcp_req,
362 fcpreq->cmdaddr, fcpreq->cmdlen);
363 if (ret) {
364 inireq = fcpreq->private;
365 inireq->tfcp_req = NULL;
366
367 fcpreq->status = tfcp_req->status;
368 fcpreq->done(fcpreq);
369 }
370}
371
372static void
373fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
374 struct fcloop_fcpreq *tfcp_req, int status)
375{
376 struct fcloop_ini_fcpreq *inireq = NULL;
377
378 if (fcpreq) {
379 inireq = fcpreq->private;
380 inireq->tfcp_req = NULL;
381
382 fcpreq->status = status;
383 fcpreq->done(fcpreq);
384 }
385}
386
387static void
388fcloop_fcp_abort_recv_work(struct work_struct *work)
356{ 389{
357 struct fcloop_ini_fcpreq *inireq = 390 struct fcloop_fcpreq *tfcp_req =
358 container_of(work, struct fcloop_ini_fcpreq, iniwork); 391 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
392 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
393
394 if (tfcp_req->tport->targetport)
395 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
396 &tfcp_req->tgt_fcp_req);
397
398 spin_lock(&tfcp_req->reqlock);
399 tfcp_req->fcpreq = NULL;
400 spin_unlock(&tfcp_req->reqlock);
359 401
360 inireq->fcpreq->done(inireq->fcpreq); 402 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
361} 403}
362 404
363/* 405/*
@@ -368,8 +410,7 @@ static void
368fcloop_tgt_fcprqst_done_work(struct work_struct *work) 410fcloop_tgt_fcprqst_done_work(struct work_struct *work)
369{ 411{
370 struct fcloop_fcpreq *tfcp_req = 412 struct fcloop_fcpreq *tfcp_req =
371 container_of(work, struct fcloop_fcpreq, work); 413 container_of(work, struct fcloop_fcpreq, tio_done_work);
372 struct fcloop_tport *tport = tfcp_req->tport;
373 struct nvmefc_fcp_req *fcpreq; 414 struct nvmefc_fcp_req *fcpreq;
374 415
375 spin_lock(&tfcp_req->reqlock); 416 spin_lock(&tfcp_req->reqlock);
@@ -377,10 +418,7 @@ fcloop_tgt_fcprqst_done_work(struct work_struct *work)
377 tfcp_req->fcpreq = NULL; 418 tfcp_req->fcpreq = NULL;
378 spin_unlock(&tfcp_req->reqlock); 419 spin_unlock(&tfcp_req->reqlock);
379 420
380 if (tport->remoteport && fcpreq) { 421 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
381 fcpreq->status = tfcp_req->status;
382 fcpreq->done(fcpreq);
383 }
384 422
385 kfree(tfcp_req); 423 kfree(tfcp_req);
386} 424}
@@ -395,7 +433,6 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
395 struct fcloop_rport *rport = remoteport->private; 433 struct fcloop_rport *rport = remoteport->private;
396 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 434 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
397 struct fcloop_fcpreq *tfcp_req; 435 struct fcloop_fcpreq *tfcp_req;
398 int ret = 0;
399 436
400 if (!rport->targetport) 437 if (!rport->targetport)
401 return -ECONNREFUSED; 438 return -ECONNREFUSED;
@@ -406,16 +443,16 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport,
406 443
407 inireq->fcpreq = fcpreq; 444 inireq->fcpreq = fcpreq;
408 inireq->tfcp_req = tfcp_req; 445 inireq->tfcp_req = tfcp_req;
409 INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
410 tfcp_req->fcpreq = fcpreq; 446 tfcp_req->fcpreq = fcpreq;
411 tfcp_req->tport = rport->targetport->private; 447 tfcp_req->tport = rport->targetport->private;
412 spin_lock_init(&tfcp_req->reqlock); 448 spin_lock_init(&tfcp_req->reqlock);
413 INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work); 449 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
450 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
451 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
414 452
415 ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req, 453 schedule_work(&tfcp_req->fcp_rcv_work);
416 fcpreq->cmdaddr, fcpreq->cmdlen);
417 454
418 return ret; 455 return 0;
419} 456}
420 457
421static void 458static void
@@ -594,7 +631,7 @@ fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
594{ 631{
595 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 632 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
596 633
597 schedule_work(&tfcp_req->work); 634 schedule_work(&tfcp_req->tio_done_work);
598} 635}
599 636
600static void 637static void
@@ -610,13 +647,12 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
610 void *hw_queue_handle, 647 void *hw_queue_handle,
611 struct nvmefc_fcp_req *fcpreq) 648 struct nvmefc_fcp_req *fcpreq)
612{ 649{
613 struct fcloop_rport *rport = remoteport->private;
614 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 650 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
615 struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req; 651 struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
616 652
617 if (!tfcp_req) 653 if (!tfcp_req)
618 /* abort has already been called */ 654 /* abort has already been called */
619 goto finish; 655 return;
620 656
621 /* break initiator/target relationship for io */ 657 /* break initiator/target relationship for io */
622 spin_lock(&tfcp_req->reqlock); 658 spin_lock(&tfcp_req->reqlock);
@@ -624,14 +660,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
624 tfcp_req->fcpreq = NULL; 660 tfcp_req->fcpreq = NULL;
625 spin_unlock(&tfcp_req->reqlock); 661 spin_unlock(&tfcp_req->reqlock);
626 662
627 if (rport->targetport) 663 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
628 nvmet_fc_rcv_fcp_abort(rport->targetport,
629 &tfcp_req->tgt_fcp_req);
630
631finish:
632 /* post the aborted io completion */
633 fcpreq->status = -ECANCELED;
634 schedule_work(&inireq->iniwork);
635} 664}
636 665
637static void 666static void
@@ -721,8 +750,7 @@ static struct nvmet_fc_target_template tgttemplate = {
721 .max_dif_sgl_segments = FCLOOP_SGL_SEGS, 750 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
722 .dma_boundary = FCLOOP_DMABOUND_4G, 751 .dma_boundary = FCLOOP_DMABOUND_4G,
723 /* optional features */ 752 /* optional features */
724 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR | 753 .target_features = 0,
725 NVMET_FCTGTFEAT_OPDONE_IN_ISR,
726 /* sizes of additional private data for data structures */ 754 /* sizes of additional private data for data structures */
727 .target_priv_sz = sizeof(struct fcloop_tport), 755 .target_priv_sz = sizeof(struct fcloop_tport),
728}; 756};