aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-09-25 14:56:24 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-10-23 23:20:43 -0400
commitdbbf3e94c2b26988d3c41af63e50189e9133eb28 (patch)
tree4cb6f91946d2229c7eb521fa460bc0c2407107a5
parentb6b4e61ff4e26d6721b2607ea23402825a38c402 (diff)
target: cleanup iblock bio submission
Move the entirely bio allocation, mapping and submission into ->do_task. This a) avoids blocking the I/O submission thread unessecarily, and b) simplifies the code greatly Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
-rw-r--r--drivers/target/target_core_iblock.c194
-rw-r--r--drivers/target/target_core_iblock.h1
2 files changed, 59 insertions, 136 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7f0cc53b4581..dcf93f85977a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -374,45 +374,6 @@ static int iblock_emulated_fua_read(struct se_device *dev)
374 return 0; 374 return 0;
375} 375}
376 376
377static int iblock_do_task(struct se_task *task)
378{
379 struct se_device *dev = task->task_se_cmd->se_dev;
380 struct iblock_req *req = IBLOCK_REQ(task);
381 struct bio *bio = req->ib_bio, *nbio = NULL;
382 struct blk_plug plug;
383 int rw;
384
385 if (task->task_data_direction == DMA_TO_DEVICE) {
386 /*
387 * Force data to disk if we pretend to not have a volatile
388 * write cache, or the initiator set the Force Unit Access bit.
389 */
390 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
391 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
392 task->task_se_cmd->t_tasks_fua))
393 rw = WRITE_FUA;
394 else
395 rw = WRITE;
396 } else {
397 rw = READ;
398 }
399
400 blk_start_plug(&plug);
401 while (bio) {
402 nbio = bio->bi_next;
403 bio->bi_next = NULL;
404 pr_debug("Calling submit_bio() task: %p bio: %p"
405 " bio->bi_sector: %llu\n", task, bio,
406 (unsigned long long)bio->bi_sector);
407
408 submit_bio(rw, bio);
409 bio = nbio;
410 }
411 blk_finish_plug(&plug);
412
413 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
414}
415
416static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) 377static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
417{ 378{
418 struct iblock_dev *ibd = dev->dev_ptr; 379 struct iblock_dev *ibd = dev->dev_ptr;
@@ -424,20 +385,7 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
424 385
425static void iblock_free_task(struct se_task *task) 386static void iblock_free_task(struct se_task *task)
426{ 387{
427 struct iblock_req *req = IBLOCK_REQ(task); 388 kfree(IBLOCK_REQ(task));
428 struct bio *bio, *hbio = req->ib_bio;
429 /*
430 * We only release the bio(s) here if iblock_bio_done() has not called
431 * bio_put() -> iblock_bio_destructor().
432 */
433 while (hbio != NULL) {
434 bio = hbio;
435 hbio = hbio->bi_next;
436 bio->bi_next = NULL;
437 bio_put(bio);
438 }
439
440 kfree(req);
441} 389}
442 390
443enum { 391enum {
@@ -556,20 +504,16 @@ static void iblock_bio_destructor(struct bio *bio)
556 bio_free(bio, ib_dev->ibd_bio_set); 504 bio_free(bio, ib_dev->ibd_bio_set);
557} 505}
558 506
559static struct bio *iblock_get_bio( 507static struct bio *
560 struct se_task *task, 508iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
561 struct iblock_req *ib_req,
562 struct iblock_dev *ib_dev,
563 int *ret,
564 sector_t lba,
565 u32 sg_num)
566{ 509{
510 struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
511 struct iblock_req *ib_req = IBLOCK_REQ(task);
567 struct bio *bio; 512 struct bio *bio;
568 513
569 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 514 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
570 if (!bio) { 515 if (!bio) {
571 pr_err("Unable to allocate memory for bio\n"); 516 pr_err("Unable to allocate memory for bio\n");
572 *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
573 return NULL; 517 return NULL;
574 } 518 }
575 519
@@ -590,17 +534,33 @@ static struct bio *iblock_get_bio(
590 return bio; 534 return bio;
591} 535}
592 536
593static int iblock_map_data_SG(struct se_task *task) 537static int iblock_do_task(struct se_task *task)
594{ 538{
595 struct se_cmd *cmd = task->task_se_cmd; 539 struct se_cmd *cmd = task->task_se_cmd;
596 struct se_device *dev = cmd->se_dev; 540 struct se_device *dev = cmd->se_dev;
597 struct iblock_dev *ib_dev = task->se_dev->dev_ptr; 541 struct bio *bio;
598 struct iblock_req *ib_req = IBLOCK_REQ(task); 542 struct bio_list list;
599 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
600 struct scatterlist *sg; 543 struct scatterlist *sg;
601 int ret = 0;
602 u32 i, sg_num = task->task_sg_nents; 544 u32 i, sg_num = task->task_sg_nents;
603 sector_t block_lba; 545 sector_t block_lba;
546 struct blk_plug plug;
547 int rw;
548
549 if (task->task_data_direction == DMA_TO_DEVICE) {
550 /*
551 * Force data to disk if we pretend to not have a volatile
552 * write cache, or the initiator set the Force Unit Access bit.
553 */
554 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
555 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
556 task->task_se_cmd->t_tasks_fua))
557 rw = WRITE_FUA;
558 else
559 rw = WRITE;
560 } else {
561 rw = READ;
562 }
563
604 /* 564 /*
605 * Do starting conversion up from non 512-byte blocksize with 565 * Do starting conversion up from non 512-byte blocksize with
606 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. 566 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
@@ -619,63 +579,43 @@ static int iblock_map_data_SG(struct se_task *task)
619 return PYX_TRANSPORT_LU_COMM_FAILURE; 579 return PYX_TRANSPORT_LU_COMM_FAILURE;
620 } 580 }
621 581
622 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); 582 bio = iblock_get_bio(task, block_lba, sg_num);
623 if (!bio) 583 if (!bio)
624 return ret; 584 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
585
586 bio_list_init(&list);
587 bio_list_add(&list, bio);
625 588
626 ib_req->ib_bio = bio;
627 hbio = tbio = bio;
628 /*
629 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
630 * from task->task_sg -> struct scatterlist memory.
631 */
632 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 589 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
633 pr_debug("task: %p bio: %p Calling bio_add_page(): page:" 590 /*
634 " %p len: %u offset: %u\n", task, bio, sg_page(sg), 591 * XXX: if the length the device accepts is shorter than the
635 sg->length, sg->offset); 592 * length of the S/G list entry this will cause and
636again: 593 * endless loop. Better hope no driver uses huge pages.
637 ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); 594 */
638 if (ret != sg->length) { 595 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
639 596 != sg->length) {
640 pr_debug("*** Set bio->bi_sector: %llu\n", 597 bio = iblock_get_bio(task, block_lba, sg_num);
641 (unsigned long long)bio->bi_sector);
642 pr_debug("** task->task_size: %u\n",
643 task->task_size);
644 pr_debug("*** bio->bi_max_vecs: %u\n",
645 bio->bi_max_vecs);
646 pr_debug("*** bio->bi_vcnt: %u\n",
647 bio->bi_vcnt);
648
649 bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
650 block_lba, sg_num);
651 if (!bio) 598 if (!bio)
652 goto fail; 599 goto fail;
653 600 bio_list_add(&list, bio);
654 tbio = tbio->bi_next = bio;
655 pr_debug("-----------------> Added +1 bio: %p to"
656 " list, Going to again\n", bio);
657 goto again;
658 } 601 }
602
659 /* Always in 512 byte units for Linux/Block */ 603 /* Always in 512 byte units for Linux/Block */
660 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 604 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
661 sg_num--; 605 sg_num--;
662 pr_debug("task: %p bio-add_page() passed!, decremented"
663 " sg_num to %u\n", task, sg_num);
664 pr_debug("task: %p bio_add_page() passed!, increased lba"
665 " to %llu\n", task, (unsigned long long)block_lba);
666 pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
667 " %u\n", task, bio->bi_vcnt);
668 } 606 }
669 607
670 return 0; 608 blk_start_plug(&plug);
609 while ((bio = bio_list_pop(&list)))
610 submit_bio(rw, bio);
611 blk_finish_plug(&plug);
612
613 return PYX_TRANSPORT_SENT_TO_TRANSPORT;
614
671fail: 615fail:
672 while (hbio) { 616 while ((bio = bio_list_pop(&list)))
673 bio = hbio;
674 hbio = hbio->bi_next;
675 bio->bi_next = NULL;
676 bio_put(bio); 617 bio_put(bio);
677 } 618 return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
678 return ret;
679} 619}
680 620
681static unsigned char *iblock_get_cdb(struct se_task *task) 621static unsigned char *iblock_get_cdb(struct se_task *task)
@@ -706,6 +646,7 @@ static void iblock_bio_done(struct bio *bio, int err)
706{ 646{
707 struct se_task *task = bio->bi_private; 647 struct se_task *task = bio->bi_private;
708 struct iblock_req *ibr = IBLOCK_REQ(task); 648 struct iblock_req *ibr = IBLOCK_REQ(task);
649
709 /* 650 /*
710 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 651 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
711 */ 652 */
@@ -720,41 +661,24 @@ static void iblock_bio_done(struct bio *bio, int err)
720 */ 661 */
721 atomic_inc(&ibr->ib_bio_err_cnt); 662 atomic_inc(&ibr->ib_bio_err_cnt);
722 smp_mb__after_atomic_inc(); 663 smp_mb__after_atomic_inc();
723 bio_put(bio);
724 /*
725 * Wait to complete the task until the last bio as completed.
726 */
727 if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
728 return;
729
730 ibr->ib_bio = NULL;
731 transport_complete_task(task, 0);
732 return;
733 } 664 }
734 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 665
735 task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
736 /*
737 * bio_put() will call iblock_bio_destructor() to release the bio back
738 * to ibr->ib_bio_set.
739 */
740 bio_put(bio); 666 bio_put(bio);
741 /* 667
742 * Wait to complete the task until the last bio as completed.
743 */
744 if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) 668 if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
745 return; 669 return;
746 /* 670
747 * Return GOOD status for task if zero ib_bio_err_cnt exists. 671 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
748 */ 672 task, bio, task->task_lba,
749 ibr->ib_bio = NULL; 673 (unsigned long long)bio->bi_sector, err);
750 transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); 674
675 transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));
751} 676}
752 677
753static struct se_subsystem_api iblock_template = { 678static struct se_subsystem_api iblock_template = {
754 .name = "iblock", 679 .name = "iblock",
755 .owner = THIS_MODULE, 680 .owner = THIS_MODULE,
756 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 681 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
757 .map_data_SG = iblock_map_data_SG,
758 .attach_hba = iblock_attach_hba, 682 .attach_hba = iblock_attach_hba,
759 .detach_hba = iblock_detach_hba, 683 .detach_hba = iblock_detach_hba,
760 .allocate_virtdevice = iblock_allocate_virtdevice, 684 .allocate_virtdevice = iblock_allocate_virtdevice,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index a121cd1b6575..7a76f663385c 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -11,7 +11,6 @@ struct iblock_req {
11 unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE]; 11 unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
12 atomic_t ib_bio_cnt; 12 atomic_t ib_bio_cnt;
13 atomic_t ib_bio_err_cnt; 13 atomic_t ib_bio_err_cnt;
14 struct bio *ib_bio;
15} ____cacheline_aligned; 14} ____cacheline_aligned;
16 15
17#define IBDF_HAS_UDEV_PATH 0x01 16#define IBDF_HAS_UDEV_PATH 0x01