diff options
| -rw-r--r-- | drivers/target/target_core_iblock.c | 167 |
1 files changed, 81 insertions, 86 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 16f48e1c8cf4..34f258b4d37f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
| @@ -55,8 +55,6 @@ static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) | |||
| 55 | 55 | ||
| 56 | static struct se_subsystem_api iblock_template; | 56 | static struct se_subsystem_api iblock_template; |
| 57 | 57 | ||
| 58 | static void iblock_bio_done(struct bio *, int); | ||
| 59 | |||
| 60 | /* iblock_attach_hba(): (Part of se_subsystem_api_t template) | 58 | /* iblock_attach_hba(): (Part of se_subsystem_api_t template) |
| 61 | * | 59 | * |
| 62 | * | 60 | * |
| @@ -255,6 +253,87 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( | |||
| 255 | return blocks_long; | 253 | return blocks_long; |
| 256 | } | 254 | } |
| 257 | 255 | ||
| 256 | static void iblock_complete_cmd(struct se_cmd *cmd) | ||
| 257 | { | ||
| 258 | struct iblock_req *ibr = cmd->priv; | ||
| 259 | u8 status; | ||
| 260 | |||
| 261 | if (!atomic_dec_and_test(&ibr->pending)) | ||
| 262 | return; | ||
| 263 | |||
| 264 | if (atomic_read(&ibr->ib_bio_err_cnt)) | ||
| 265 | status = SAM_STAT_CHECK_CONDITION; | ||
| 266 | else | ||
| 267 | status = SAM_STAT_GOOD; | ||
| 268 | |||
| 269 | target_complete_cmd(cmd, status); | ||
| 270 | kfree(ibr); | ||
| 271 | } | ||
| 272 | |||
| 273 | static void iblock_bio_done(struct bio *bio, int err) | ||
| 274 | { | ||
| 275 | struct se_cmd *cmd = bio->bi_private; | ||
| 276 | struct iblock_req *ibr = cmd->priv; | ||
| 277 | |||
| 278 | /* | ||
| 279 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | ||
| 280 | */ | ||
| 281 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) | ||
| 282 | err = -EIO; | ||
| 283 | |||
| 284 | if (err != 0) { | ||
| 285 | pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," | ||
| 286 | " err: %d\n", bio, err); | ||
| 287 | /* | ||
| 288 | * Bump the ib_bio_err_cnt and release bio. | ||
| 289 | */ | ||
| 290 | atomic_inc(&ibr->ib_bio_err_cnt); | ||
| 291 | smp_mb__after_atomic_inc(); | ||
| 292 | } | ||
| 293 | |||
| 294 | bio_put(bio); | ||
| 295 | |||
| 296 | iblock_complete_cmd(cmd); | ||
| 297 | } | ||
| 298 | |||
| 299 | static struct bio * | ||
| 300 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | ||
| 301 | { | ||
| 302 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); | ||
| 303 | struct bio *bio; | ||
| 304 | |||
| 305 | /* | ||
| 306 | * Only allocate as many vector entries as the bio code allows us to, | ||
| 307 | * we'll loop later on until we have handled the whole request. | ||
| 308 | */ | ||
| 309 | if (sg_num > BIO_MAX_PAGES) | ||
| 310 | sg_num = BIO_MAX_PAGES; | ||
| 311 | |||
| 312 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); | ||
| 313 | if (!bio) { | ||
| 314 | pr_err("Unable to allocate memory for bio\n"); | ||
| 315 | return NULL; | ||
| 316 | } | ||
| 317 | |||
| 318 | bio->bi_bdev = ib_dev->ibd_bd; | ||
| 319 | bio->bi_private = cmd; | ||
| 320 | bio->bi_end_io = &iblock_bio_done; | ||
| 321 | bio->bi_sector = lba; | ||
| 322 | |||
| 323 | return bio; | ||
| 324 | } | ||
| 325 | |||
| 326 | static void iblock_submit_bios(struct bio_list *list, int rw) | ||
| 327 | { | ||
| 328 | struct blk_plug plug; | ||
| 329 | struct bio *bio; | ||
| 330 | |||
| 331 | blk_start_plug(&plug); | ||
| 332 | while ((bio = bio_list_pop(list))) | ||
| 333 | submit_bio(rw, bio); | ||
| 334 | blk_finish_plug(&plug); | ||
| 335 | } | ||
| 336 | |||
| 258 | static void iblock_end_io_flush(struct bio *bio, int err) | 337 | static void iblock_end_io_flush(struct bio *bio, int err) |
| 259 | { | 338 | { |
| 260 | struct se_cmd *cmd = bio->bi_private; | 339 | struct se_cmd *cmd = bio->bi_private; |
| @@ -377,10 +456,6 @@ err: | |||
| 377 | return ret; | 456 | return ret; |
| 378 | } | 457 | } |
| 379 | 458 | ||
| 380 | static struct bio *iblock_get_bio(struct se_cmd *, sector_t, u32); | ||
| 381 | static void iblock_submit_bios(struct bio_list *, int); | ||
| 382 | static void iblock_complete_cmd(struct se_cmd *); | ||
| 383 | |||
| 384 | static sense_reason_t | 459 | static sense_reason_t |
| 385 | iblock_execute_write_same_unmap(struct se_cmd *cmd) | 460 | iblock_execute_write_same_unmap(struct se_cmd *cmd) |
| 386 | { | 461 | { |
| @@ -565,60 +640,6 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) | |||
| 565 | return bl; | 640 | return bl; |
| 566 | } | 641 | } |
| 567 | 642 | ||
| 568 | static void iblock_complete_cmd(struct se_cmd *cmd) | ||
| 569 | { | ||
| 570 | struct iblock_req *ibr = cmd->priv; | ||
| 571 | u8 status; | ||
| 572 | |||
| 573 | if (!atomic_dec_and_test(&ibr->pending)) | ||
| 574 | return; | ||
| 575 | |||
| 576 | if (atomic_read(&ibr->ib_bio_err_cnt)) | ||
| 577 | status = SAM_STAT_CHECK_CONDITION; | ||
| 578 | else | ||
| 579 | status = SAM_STAT_GOOD; | ||
| 580 | |||
| 581 | target_complete_cmd(cmd, status); | ||
| 582 | kfree(ibr); | ||
| 583 | } | ||
| 584 | |||
| 585 | static struct bio * | ||
| 586 | iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) | ||
| 587 | { | ||
| 588 | struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); | ||
| 589 | struct bio *bio; | ||
| 590 | |||
| 591 | /* | ||
| 592 | * Only allocate as many vector entries as the bio code allows us to, | ||
| 593 | * we'll loop later on until we have handled the whole request. | ||
| 594 | */ | ||
| 595 | if (sg_num > BIO_MAX_PAGES) | ||
| 596 | sg_num = BIO_MAX_PAGES; | ||
| 597 | |||
| 598 | bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); | ||
| 599 | if (!bio) { | ||
| 600 | pr_err("Unable to allocate memory for bio\n"); | ||
| 601 | return NULL; | ||
| 602 | } | ||
| 603 | |||
| 604 | bio->bi_bdev = ib_dev->ibd_bd; | ||
| 605 | bio->bi_private = cmd; | ||
| 606 | bio->bi_end_io = &iblock_bio_done; | ||
| 607 | bio->bi_sector = lba; | ||
| 608 | return bio; | ||
| 609 | } | ||
| 610 | |||
| 611 | static void iblock_submit_bios(struct bio_list *list, int rw) | ||
| 612 | { | ||
| 613 | struct blk_plug plug; | ||
| 614 | struct bio *bio; | ||
| 615 | |||
| 616 | blk_start_plug(&plug); | ||
| 617 | while ((bio = bio_list_pop(list))) | ||
| 618 | submit_bio(rw, bio); | ||
| 619 | blk_finish_plug(&plug); | ||
| 620 | } | ||
| 621 | |||
| 622 | static sense_reason_t | 643 | static sense_reason_t |
| 623 | iblock_execute_rw(struct se_cmd *cmd) | 644 | iblock_execute_rw(struct se_cmd *cmd) |
| 624 | { | 645 | { |
| @@ -739,32 +760,6 @@ static sector_t iblock_get_blocks(struct se_device *dev) | |||
| 739 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); | 760 | return iblock_emulate_read_cap_with_block_size(dev, bd, q); |
| 740 | } | 761 | } |
| 741 | 762 | ||
| 742 | static void iblock_bio_done(struct bio *bio, int err) | ||
| 743 | { | ||
| 744 | struct se_cmd *cmd = bio->bi_private; | ||
| 745 | struct iblock_req *ibr = cmd->priv; | ||
| 746 | |||
| 747 | /* | ||
| 748 | * Set -EIO if !BIO_UPTODATE and the passed is still err=0 | ||
| 749 | */ | ||
| 750 | if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) | ||
| 751 | err = -EIO; | ||
| 752 | |||
| 753 | if (err != 0) { | ||
| 754 | pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," | ||
| 755 | " err: %d\n", bio, err); | ||
| 756 | /* | ||
| 757 | * Bump the ib_bio_err_cnt and release bio. | ||
| 758 | */ | ||
| 759 | atomic_inc(&ibr->ib_bio_err_cnt); | ||
| 760 | smp_mb__after_atomic_inc(); | ||
| 761 | } | ||
| 762 | |||
| 763 | bio_put(bio); | ||
| 764 | |||
| 765 | iblock_complete_cmd(cmd); | ||
| 766 | } | ||
| 767 | |||
| 768 | static struct sbc_ops iblock_sbc_ops = { | 763 | static struct sbc_ops iblock_sbc_ops = { |
| 769 | .execute_rw = iblock_execute_rw, | 764 | .execute_rw = iblock_execute_rw, |
| 770 | .execute_sync_cache = iblock_execute_sync_cache, | 765 | .execute_sync_cache = iblock_execute_sync_cache, |
