diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-24 17:37:12 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-24 17:37:12 -0400 |
| commit | 04e904aa79ccdeede66d58e72e4c5402e4b9bd70 (patch) | |
| tree | 16cb8b11b9a6b6efda065e55593a797e6bdec77d | |
| parent | 59d9cb91d07236b5255a3b1999f6f2d9985b3657 (diff) | |
| parent | 95a49603707d982b25d17c5b70e220a05556a2f9 (diff) | |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
"A few fixes for the current series that should go into -rc4. This
contains:
- a fix for a potential corruption of un-started requests from Ming.
- a blk-stat fix from Omar, ensuring we flush the stat batch before
checking nr_samples.
- a set of fixes from Sagi for the nvmeof family"
* 'for-linus' of git://git.kernel.dk/linux-block:
blk-mq: don't complete un-started request in timeout handler
nvme-loop: handle cpu unplug when re-establishing the controller
nvme-rdma: handle cpu unplug when re-establishing the controller
nvmet-rdma: Fix a possible uninitialized variable dereference
nvmet: confirm sq percpu has scheduled and switched to atomic
nvme-loop: fix a possible use-after-free when destroying the admin queue
blk-stat: fix blk_stat_sum() if all samples are batched
| -rw-r--r-- | block/blk-mq.c | 11 | ||||
| -rw-r--r-- | block/blk-stat.c | 4 | ||||
| -rw-r--r-- | drivers/nvme/host/rdma.c | 28 | ||||
| -rw-r--r-- | drivers/nvme/target/core.c | 11 | ||||
| -rw-r--r-- | drivers/nvme/target/loop.c | 90 | ||||
| -rw-r--r-- | drivers/nvme/target/nvmet.h | 1 | ||||
| -rw-r--r-- | drivers/nvme/target/rdma.c | 8 |
7 files changed, 82 insertions, 71 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index a4546f060e80..08a49c69738b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, | |||
| 697 | { | 697 | { |
| 698 | struct blk_mq_timeout_data *data = priv; | 698 | struct blk_mq_timeout_data *data = priv; |
| 699 | 699 | ||
| 700 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { | 700 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) |
| 701 | /* | ||
| 702 | * If a request wasn't started before the queue was | ||
| 703 | * marked dying, kill it here or it'll go unnoticed. | ||
| 704 | */ | ||
| 705 | if (unlikely(blk_queue_dying(rq->q))) { | ||
| 706 | rq->errors = -EIO; | ||
| 707 | blk_mq_end_request(rq, rq->errors); | ||
| 708 | } | ||
| 709 | return; | 701 | return; |
| 710 | } | ||
| 711 | 702 | ||
| 712 | if (time_after_eq(jiffies, rq->deadline)) { | 703 | if (time_after_eq(jiffies, rq->deadline)) { |
| 713 | if (!blk_mark_rq_complete(rq)) | 704 | if (!blk_mark_rq_complete(rq)) |
diff --git a/block/blk-stat.c b/block/blk-stat.c index 9b43efb8933f..186fcb981e9b 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c | |||
| @@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat) | |||
| 30 | 30 | ||
| 31 | static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) | 31 | static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) |
| 32 | { | 32 | { |
| 33 | blk_stat_flush_batch(src); | ||
| 34 | |||
| 33 | if (!src->nr_samples) | 35 | if (!src->nr_samples) |
| 34 | return; | 36 | return; |
| 35 | 37 | ||
| 36 | blk_stat_flush_batch(src); | ||
| 37 | |||
| 38 | dst->min = min(dst->min, src->min); | 38 | dst->min = min(dst->min, src->min); |
| 39 | dst->max = max(dst->max, src->max); | 39 | dst->max = max(dst->max, src->max); |
| 40 | 40 | ||
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 779f516e7a4e..47a479f26e5d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
| @@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl, | |||
| 343 | struct ib_device *ibdev = dev->dev; | 343 | struct ib_device *ibdev = dev->dev; |
| 344 | int ret; | 344 | int ret; |
| 345 | 345 | ||
| 346 | BUG_ON(queue_idx >= ctrl->queue_count); | ||
| 347 | |||
| 348 | ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), | 346 | ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), |
| 349 | DMA_TO_DEVICE); | 347 | DMA_TO_DEVICE); |
| 350 | if (ret) | 348 | if (ret) |
| @@ -652,8 +650,22 @@ out_free_queues: | |||
| 652 | 650 | ||
| 653 | static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) | 651 | static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) |
| 654 | { | 652 | { |
| 653 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
| 654 | unsigned int nr_io_queues; | ||
| 655 | int i, ret; | 655 | int i, ret; |
| 656 | 656 | ||
| 657 | nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); | ||
| 658 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | ||
| 659 | if (ret) | ||
| 660 | return ret; | ||
| 661 | |||
| 662 | ctrl->queue_count = nr_io_queues + 1; | ||
| 663 | if (ctrl->queue_count < 2) | ||
| 664 | return 0; | ||
| 665 | |||
| 666 | dev_info(ctrl->ctrl.device, | ||
| 667 | "creating %d I/O queues.\n", nr_io_queues); | ||
| 668 | |||
| 657 | for (i = 1; i < ctrl->queue_count; i++) { | 669 | for (i = 1; i < ctrl->queue_count; i++) { |
| 658 | ret = nvme_rdma_init_queue(ctrl, i, | 670 | ret = nvme_rdma_init_queue(ctrl, i, |
| 659 | ctrl->ctrl.opts->queue_size); | 671 | ctrl->ctrl.opts->queue_size); |
| @@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { | |||
| 1791 | 1803 | ||
| 1792 | static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) | 1804 | static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) |
| 1793 | { | 1805 | { |
| 1794 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
| 1795 | int ret; | 1806 | int ret; |
| 1796 | 1807 | ||
| 1797 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); | ||
| 1798 | if (ret) | ||
| 1799 | return ret; | ||
| 1800 | |||
| 1801 | ctrl->queue_count = opts->nr_io_queues + 1; | ||
| 1802 | if (ctrl->queue_count < 2) | ||
| 1803 | return 0; | ||
| 1804 | |||
| 1805 | dev_info(ctrl->ctrl.device, | ||
| 1806 | "creating %d I/O queues.\n", opts->nr_io_queues); | ||
| 1807 | |||
| 1808 | ret = nvme_rdma_init_io_queues(ctrl); | 1808 | ret = nvme_rdma_init_io_queues(ctrl); |
| 1809 | if (ret) | 1809 | if (ret) |
| 1810 | return ret; | 1810 | return ret; |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 11b0a0a5f661..798653b329b2 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
| @@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, | |||
| 425 | ctrl->sqs[qid] = sq; | 425 | ctrl->sqs[qid] = sq; |
| 426 | } | 426 | } |
| 427 | 427 | ||
| 428 | static void nvmet_confirm_sq(struct percpu_ref *ref) | ||
| 429 | { | ||
| 430 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); | ||
| 431 | |||
| 432 | complete(&sq->confirm_done); | ||
| 433 | } | ||
| 434 | |||
| 428 | void nvmet_sq_destroy(struct nvmet_sq *sq) | 435 | void nvmet_sq_destroy(struct nvmet_sq *sq) |
| 429 | { | 436 | { |
| 430 | /* | 437 | /* |
| @@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) | |||
| 433 | */ | 440 | */ |
| 434 | if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) | 441 | if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) |
| 435 | nvmet_async_events_free(sq->ctrl); | 442 | nvmet_async_events_free(sq->ctrl); |
| 436 | percpu_ref_kill(&sq->ref); | 443 | percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); |
| 444 | wait_for_completion(&sq->confirm_done); | ||
| 437 | wait_for_completion(&sq->free_done); | 445 | wait_for_completion(&sq->free_done); |
| 438 | percpu_ref_exit(&sq->ref); | 446 | percpu_ref_exit(&sq->ref); |
| 439 | 447 | ||
| @@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq) | |||
| 461 | return ret; | 469 | return ret; |
| 462 | } | 470 | } |
| 463 | init_completion(&sq->free_done); | 471 | init_completion(&sq->free_done); |
| 472 | init_completion(&sq->confirm_done); | ||
| 464 | 473 | ||
| 465 | return 0; | 474 | return 0; |
| 466 | } | 475 | } |
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index d1f06e7768ff..22f7bc6bac7f 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
| @@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) | |||
| 223 | static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, | 223 | static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, |
| 224 | struct nvme_loop_iod *iod, unsigned int queue_idx) | 224 | struct nvme_loop_iod *iod, unsigned int queue_idx) |
| 225 | { | 225 | { |
| 226 | BUG_ON(queue_idx >= ctrl->queue_count); | ||
| 227 | |||
| 228 | iod->req.cmd = &iod->cmd; | 226 | iod->req.cmd = &iod->cmd; |
| 229 | iod->req.rsp = &iod->rsp; | 227 | iod->req.rsp = &iod->rsp; |
| 230 | iod->queue = &ctrl->queues[queue_idx]; | 228 | iod->queue = &ctrl->queues[queue_idx]; |
| @@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = { | |||
| 288 | 286 | ||
| 289 | static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) | 287 | static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) |
| 290 | { | 288 | { |
| 289 | nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); | ||
| 291 | blk_cleanup_queue(ctrl->ctrl.admin_q); | 290 | blk_cleanup_queue(ctrl->ctrl.admin_q); |
| 292 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | 291 | blk_mq_free_tag_set(&ctrl->admin_tag_set); |
| 293 | nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); | ||
| 294 | } | 292 | } |
| 295 | 293 | ||
| 296 | static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) | 294 | static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) |
| @@ -314,6 +312,43 @@ free_ctrl: | |||
| 314 | kfree(ctrl); | 312 | kfree(ctrl); |
| 315 | } | 313 | } |
| 316 | 314 | ||
| 315 | static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) | ||
| 316 | { | ||
| 317 | int i; | ||
| 318 | |||
| 319 | for (i = 1; i < ctrl->queue_count; i++) | ||
| 320 | nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); | ||
| 321 | } | ||
| 322 | |||
| 323 | static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) | ||
| 324 | { | ||
| 325 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
| 326 | unsigned int nr_io_queues; | ||
| 327 | int ret, i; | ||
| 328 | |||
| 329 | nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); | ||
| 330 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | ||
| 331 | if (ret || !nr_io_queues) | ||
| 332 | return ret; | ||
| 333 | |||
| 334 | dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); | ||
| 335 | |||
| 336 | for (i = 1; i <= nr_io_queues; i++) { | ||
| 337 | ctrl->queues[i].ctrl = ctrl; | ||
| 338 | ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); | ||
| 339 | if (ret) | ||
| 340 | goto out_destroy_queues; | ||
| 341 | |||
| 342 | ctrl->queue_count++; | ||
| 343 | } | ||
| 344 | |||
| 345 | return 0; | ||
| 346 | |||
| 347 | out_destroy_queues: | ||
| 348 | nvme_loop_destroy_io_queues(ctrl); | ||
| 349 | return ret; | ||
| 350 | } | ||
| 351 | |||
| 317 | static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) | 352 | static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) |
| 318 | { | 353 | { |
| 319 | int error; | 354 | int error; |
| @@ -385,17 +420,13 @@ out_free_sq: | |||
| 385 | 420 | ||
| 386 | static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) | 421 | static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) |
| 387 | { | 422 | { |
| 388 | int i; | ||
| 389 | |||
| 390 | nvme_stop_keep_alive(&ctrl->ctrl); | 423 | nvme_stop_keep_alive(&ctrl->ctrl); |
| 391 | 424 | ||
| 392 | if (ctrl->queue_count > 1) { | 425 | if (ctrl->queue_count > 1) { |
| 393 | nvme_stop_queues(&ctrl->ctrl); | 426 | nvme_stop_queues(&ctrl->ctrl); |
| 394 | blk_mq_tagset_busy_iter(&ctrl->tag_set, | 427 | blk_mq_tagset_busy_iter(&ctrl->tag_set, |
| 395 | nvme_cancel_request, &ctrl->ctrl); | 428 | nvme_cancel_request, &ctrl->ctrl); |
| 396 | 429 | nvme_loop_destroy_io_queues(ctrl); | |
| 397 | for (i = 1; i < ctrl->queue_count; i++) | ||
| 398 | nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); | ||
| 399 | } | 430 | } |
| 400 | 431 | ||
| 401 | if (ctrl->ctrl.state == NVME_CTRL_LIVE) | 432 | if (ctrl->ctrl.state == NVME_CTRL_LIVE) |
| @@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) | |||
| 467 | if (ret) | 498 | if (ret) |
| 468 | goto out_disable; | 499 | goto out_disable; |
| 469 | 500 | ||
| 470 | for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { | 501 | ret = nvme_loop_init_io_queues(ctrl); |
| 471 | ctrl->queues[i].ctrl = ctrl; | 502 | if (ret) |
| 472 | ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); | 503 | goto out_destroy_admin; |
| 473 | if (ret) | ||
| 474 | goto out_free_queues; | ||
| 475 | |||
| 476 | ctrl->queue_count++; | ||
| 477 | } | ||
| 478 | 504 | ||
| 479 | for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { | 505 | for (i = 1; i < ctrl->queue_count; i++) { |
| 480 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); | 506 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
| 481 | if (ret) | 507 | if (ret) |
| 482 | goto out_free_queues; | 508 | goto out_destroy_io; |
| 483 | } | 509 | } |
| 484 | 510 | ||
| 485 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | 511 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); |
| @@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) | |||
| 492 | 518 | ||
| 493 | return; | 519 | return; |
| 494 | 520 | ||
| 495 | out_free_queues: | 521 | out_destroy_io: |
| 496 | for (i = 1; i < ctrl->queue_count; i++) | 522 | nvme_loop_destroy_io_queues(ctrl); |
| 497 | nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); | 523 | out_destroy_admin: |
| 498 | nvme_loop_destroy_admin_queue(ctrl); | 524 | nvme_loop_destroy_admin_queue(ctrl); |
| 499 | out_disable: | 525 | out_disable: |
| 500 | dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); | 526 | dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); |
| @@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { | |||
| 533 | 559 | ||
| 534 | static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) | 560 | static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) |
| 535 | { | 561 | { |
| 536 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
| 537 | int ret, i; | 562 | int ret, i; |
| 538 | 563 | ||
| 539 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); | 564 | ret = nvme_loop_init_io_queues(ctrl); |
| 540 | if (ret || !opts->nr_io_queues) | 565 | if (ret) |
| 541 | return ret; | 566 | return ret; |
| 542 | 567 | ||
| 543 | dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", | ||
| 544 | opts->nr_io_queues); | ||
| 545 | |||
| 546 | for (i = 1; i <= opts->nr_io_queues; i++) { | ||
| 547 | ctrl->queues[i].ctrl = ctrl; | ||
| 548 | ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); | ||
| 549 | if (ret) | ||
| 550 | goto out_destroy_queues; | ||
| 551 | |||
| 552 | ctrl->queue_count++; | ||
| 553 | } | ||
| 554 | |||
| 555 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); | 568 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
| 556 | ctrl->tag_set.ops = &nvme_loop_mq_ops; | 569 | ctrl->tag_set.ops = &nvme_loop_mq_ops; |
| 557 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; | 570 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; |
| @@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) | |||
| 575 | goto out_free_tagset; | 588 | goto out_free_tagset; |
| 576 | } | 589 | } |
| 577 | 590 | ||
| 578 | for (i = 1; i <= opts->nr_io_queues; i++) { | 591 | for (i = 1; i < ctrl->queue_count; i++) { |
| 579 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); | 592 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
| 580 | if (ret) | 593 | if (ret) |
| 581 | goto out_cleanup_connect_q; | 594 | goto out_cleanup_connect_q; |
| @@ -588,8 +601,7 @@ out_cleanup_connect_q: | |||
| 588 | out_free_tagset: | 601 | out_free_tagset: |
| 589 | blk_mq_free_tag_set(&ctrl->tag_set); | 602 | blk_mq_free_tag_set(&ctrl->tag_set); |
| 590 | out_destroy_queues: | 603 | out_destroy_queues: |
| 591 | for (i = 1; i < ctrl->queue_count; i++) | 604 | nvme_loop_destroy_io_queues(ctrl); |
| 592 | nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); | ||
| 593 | return ret; | 605 | return ret; |
| 594 | } | 606 | } |
| 595 | 607 | ||
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 1370eee0a3c0..f7ff15f17ca9 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
| @@ -73,6 +73,7 @@ struct nvmet_sq { | |||
| 73 | u16 qid; | 73 | u16 qid; |
| 74 | u16 size; | 74 | u16 size; |
| 75 | struct completion free_done; | 75 | struct completion free_done; |
| 76 | struct completion confirm_done; | ||
| 76 | }; | 77 | }; |
| 77 | 78 | ||
| 78 | /** | 79 | /** |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 9aa1da3778b3..ecc4fe862561 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
| @@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, | |||
| 703 | { | 703 | { |
| 704 | u16 status; | 704 | u16 status; |
| 705 | 705 | ||
| 706 | cmd->queue = queue; | ||
| 707 | cmd->n_rdma = 0; | ||
| 708 | cmd->req.port = queue->port; | ||
| 709 | |||
| 710 | |||
| 711 | ib_dma_sync_single_for_cpu(queue->dev->device, | 706 | ib_dma_sync_single_for_cpu(queue->dev->device, |
| 712 | cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, | 707 | cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, |
| 713 | DMA_FROM_DEVICE); | 708 | DMA_FROM_DEVICE); |
| @@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) | |||
| 760 | 755 | ||
| 761 | cmd->queue = queue; | 756 | cmd->queue = queue; |
| 762 | rsp = nvmet_rdma_get_rsp(queue); | 757 | rsp = nvmet_rdma_get_rsp(queue); |
| 758 | rsp->queue = queue; | ||
| 763 | rsp->cmd = cmd; | 759 | rsp->cmd = cmd; |
| 764 | rsp->flags = 0; | 760 | rsp->flags = 0; |
| 765 | rsp->req.cmd = cmd->nvme_cmd; | 761 | rsp->req.cmd = cmd->nvme_cmd; |
| 762 | rsp->req.port = queue->port; | ||
| 763 | rsp->n_rdma = 0; | ||
| 766 | 764 | ||
| 767 | if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { | 765 | if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { |
| 768 | unsigned long flags; | 766 | unsigned long flags; |
