aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_iblock.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-12-21 14:20:31 -0500
committerNicholas Bellinger <nab@linux-iscsi.org>2012-02-25 17:37:46 -0500
commitd5b4a21b3dc116b477c1b1b493233a73aacbb440 (patch)
treef11a6274442cb2e5a1ab6209661dbdd8969eb76e /drivers/target/target_core_iblock.c
parent6e315a066d7d1917f762d2f2a14e9a71a8656fea (diff)
target: increase iblock task sizes
There is no real limit for task sizes in the iblock driver given that we can chain bios. Increase the maximum size to UINT_MAX, and change the code to submit bios in a smaller batch size to avoid deadlocks when having more bios in flight than the pool supports. Also increase the pool size to always allow multiple tasks to be in flight. I also had to change the task refcounting to include one reference for the submission task, which is a standard practice in this kind of code in Linux (e.g. XFS I/O submission). This was wrong before, but couldn't be hit easily. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_iblock.c')
-rw-r--r--drivers/target/target_core_iblock.c52
1 files changed, 36 insertions, 16 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 562e787b31d0..2ec299e8a73e 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -46,6 +46,9 @@
46 46
47#include "target_core_iblock.h" 47#include "target_core_iblock.h"
48 48
49#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
50#define IBLOCK_BIO_POOL_SIZE 128
51
49static struct se_subsystem_api iblock_template; 52static struct se_subsystem_api iblock_template;
50 53
51static void iblock_bio_done(struct bio *, int); 54static void iblock_bio_done(struct bio *, int);
@@ -100,10 +103,8 @@ static struct se_device *iblock_create_virtdevice(
100 return ERR_PTR(ret); 103 return ERR_PTR(ret);
101 } 104 }
102 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 105 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
103 /* 106
104 * These settings need to be made tunable.. 107 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
105 */
106 ib_dev->ibd_bio_set = bioset_create(32, 0);
107 if (!ib_dev->ibd_bio_set) { 108 if (!ib_dev->ibd_bio_set) {
108 pr_err("IBLOCK: Unable to create bioset()\n"); 109 pr_err("IBLOCK: Unable to create bioset()\n");
109 return ERR_PTR(-ENOMEM); 110 return ERR_PTR(-ENOMEM);
@@ -129,8 +130,8 @@ static struct se_device *iblock_create_virtdevice(
129 q = bdev_get_queue(bd); 130 q = bdev_get_queue(bd);
130 limits = &dev_limits.limits; 131 limits = &dev_limits.limits;
131 limits->logical_block_size = bdev_logical_block_size(bd); 132 limits->logical_block_size = bdev_logical_block_size(bd);
132 limits->max_hw_sectors = queue_max_hw_sectors(q); 133 limits->max_hw_sectors = UINT_MAX;
133 limits->max_sectors = queue_max_sectors(q); 134 limits->max_sectors = UINT_MAX;
134 dev_limits.hw_queue_depth = q->nr_requests; 135 dev_limits.hw_queue_depth = q->nr_requests;
135 dev_limits.queue_depth = q->nr_requests; 136 dev_limits.queue_depth = q->nr_requests;
136 137
@@ -204,7 +205,7 @@ iblock_alloc_task(unsigned char *cdb)
204 return NULL; 205 return NULL;
205 } 206 }
206 207
207 atomic_set(&ib_req->ib_bio_cnt, 0); 208 atomic_set(&ib_req->pending, 1);
208 return &ib_req->ib_task; 209 return &ib_req->ib_task;
209} 210}
210 211
@@ -484,24 +485,35 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
484 bio->bi_destructor = iblock_bio_destructor; 485 bio->bi_destructor = iblock_bio_destructor;
485 bio->bi_end_io = &iblock_bio_done; 486 bio->bi_end_io = &iblock_bio_done;
486 bio->bi_sector = lba; 487 bio->bi_sector = lba;
487 atomic_inc(&ib_req->ib_bio_cnt); 488 atomic_inc(&ib_req->pending);
488 489
489 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); 490 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
490 pr_debug("Set ib_req->ib_bio_cnt: %d\n", 491 pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending));
491 atomic_read(&ib_req->ib_bio_cnt));
492 return bio; 492 return bio;
493} 493}
494 494
495static void iblock_submit_bios(struct bio_list *list, int rw)
496{
497 struct blk_plug plug;
498 struct bio *bio;
499
500 blk_start_plug(&plug);
501 while ((bio = bio_list_pop(list)))
502 submit_bio(rw, bio);
503 blk_finish_plug(&plug);
504}
505
495static int iblock_do_task(struct se_task *task) 506static int iblock_do_task(struct se_task *task)
496{ 507{
497 struct se_cmd *cmd = task->task_se_cmd; 508 struct se_cmd *cmd = task->task_se_cmd;
498 struct se_device *dev = cmd->se_dev; 509 struct se_device *dev = cmd->se_dev;
510 struct iblock_req *ibr = IBLOCK_REQ(task);
499 struct bio *bio; 511 struct bio *bio;
500 struct bio_list list; 512 struct bio_list list;
501 struct scatterlist *sg; 513 struct scatterlist *sg;
502 u32 i, sg_num = task->task_sg_nents; 514 u32 i, sg_num = task->task_sg_nents;
503 sector_t block_lba; 515 sector_t block_lba;
504 struct blk_plug plug; 516 unsigned bio_cnt;
505 int rw; 517 int rw;
506 518
507 if (task->task_data_direction == DMA_TO_DEVICE) { 519 if (task->task_data_direction == DMA_TO_DEVICE) {
@@ -546,6 +558,7 @@ static int iblock_do_task(struct se_task *task)
546 558
547 bio_list_init(&list); 559 bio_list_init(&list);
548 bio_list_add(&list, bio); 560 bio_list_add(&list, bio);
561 bio_cnt = 1;
549 562
550 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 563 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
551 /* 564 /*
@@ -555,10 +568,16 @@ static int iblock_do_task(struct se_task *task)
555 */ 568 */
556 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 569 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
557 != sg->length) { 570 != sg->length) {
571 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
572 iblock_submit_bios(&list, rw);
573 bio_cnt = 0;
574 }
575
558 bio = iblock_get_bio(task, block_lba, sg_num); 576 bio = iblock_get_bio(task, block_lba, sg_num);
559 if (!bio) 577 if (!bio)
560 goto fail; 578 goto fail;
561 bio_list_add(&list, bio); 579 bio_list_add(&list, bio);
580 bio_cnt++;
562 } 581 }
563 582
564 /* Always in 512 byte units for Linux/Block */ 583 /* Always in 512 byte units for Linux/Block */
@@ -566,11 +585,12 @@ static int iblock_do_task(struct se_task *task)
566 sg_num--; 585 sg_num--;
567 } 586 }
568 587
569 blk_start_plug(&plug); 588 iblock_submit_bios(&list, rw);
570 while ((bio = bio_list_pop(&list)))
571 submit_bio(rw, bio);
572 blk_finish_plug(&plug);
573 589
590 if (atomic_dec_and_test(&ibr->pending)) {
591 transport_complete_task(task,
592 !atomic_read(&ibr->ib_bio_err_cnt));
593 }
574 return 0; 594 return 0;
575 595
576fail: 596fail:
@@ -622,7 +642,7 @@ static void iblock_bio_done(struct bio *bio, int err)
622 642
623 bio_put(bio); 643 bio_put(bio);
624 644
625 if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) 645 if (!atomic_dec_and_test(&ibr->pending))
626 return; 646 return;
627 647
628 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 648 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",