aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_iblock.c
diff options
context:
space:
mode:
authorAndy Grover <agrover@redhat.com>2011-06-08 13:36:43 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-07-22 05:37:48 -0400
commit6708bb27bb2703da238f21f516034263348af5be (patch)
treea23e1f9eab22933d773d6b6ad6263d6751379a00 /drivers/target/target_core_iblock.c
parentec98f7825c6eaa4a9afb0eb518826efc8a2ed4a2 (diff)
target: Follow up core updates from AGrover and HCH (round 4)
This patch contains the squashed version of forth round series cleanups from Andy and Christoph following the post heavy lifting in the preceeding: 'Eliminate usage of struct se_mem' and 'Make all control CDBs scatter-gather' changes. This also includes a conversion of target core and the v3.0 mainline fabric modules (loopback and tcm_fc) to use pr_debug and the CONFIG_DYNAMIC_DEBUG infrastructure! These have been squashed into this third and final round for v3.1. target: Remove ifdeffed code in t_g_process_write target: Remove direct ramdisk code target: Rename task_sg_num to task_sg_nents target: Remove custom debug macros for pr_debug. Use pr_err(). target: Remove custom debug macros in mainline fabrics target: Set WSNZ=1 in block limits VPD. Abort if WRITE_SAME sectors = 0 target: Remove transport do_se_mem_map callback target: Further simplify transport_free_pages target: Redo task allocation return value handling target: Remove extra parentheses target: change alloc_task call to take *cdb, not *cmd (nab: Fix bogus struct file assignments in fd_do_readv and fd_do_writev) Signed-off-by: Andy Grover <agrover@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_iblock.c')
-rw-r--r--drivers/target/target_core_iblock.c116
1 files changed, 55 insertions, 61 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 164b72106b88..251fc66a8212 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -47,12 +47,6 @@
47 47
48#include "target_core_iblock.h" 48#include "target_core_iblock.h"
49 49
50#if 0
51#define DEBUG_IBLOCK(x...) printk(x)
52#else
53#define DEBUG_IBLOCK(x...)
54#endif
55
56static struct se_subsystem_api iblock_template; 50static struct se_subsystem_api iblock_template;
57 51
58static void iblock_bio_done(struct bio *, int); 52static void iblock_bio_done(struct bio *, int);
@@ -66,8 +60,8 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
66 struct iblock_hba *ib_host; 60 struct iblock_hba *ib_host;
67 61
68 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); 62 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
69 if (!(ib_host)) { 63 if (!ib_host) {
70 printk(KERN_ERR "Unable to allocate memory for" 64 pr_err("Unable to allocate memory for"
71 " struct iblock_hba\n"); 65 " struct iblock_hba\n");
72 return -ENOMEM; 66 return -ENOMEM;
73 } 67 }
@@ -76,11 +70,11 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
76 70
77 hba->hba_ptr = ib_host; 71 hba->hba_ptr = ib_host;
78 72
79 printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 73 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
80 " Generic Target Core Stack %s\n", hba->hba_id, 74 " Generic Target Core Stack %s\n", hba->hba_id,
81 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 75 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
82 76
83 printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", 77 pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
84 hba->hba_id, ib_host->iblock_host_id); 78 hba->hba_id, ib_host->iblock_host_id);
85 79
86 return 0; 80 return 0;
@@ -90,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba)
90{ 84{
91 struct iblock_hba *ib_host = hba->hba_ptr; 85 struct iblock_hba *ib_host = hba->hba_ptr;
92 86
93 printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" 87 pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
94 " Target Core\n", hba->hba_id, ib_host->iblock_host_id); 88 " Target Core\n", hba->hba_id, ib_host->iblock_host_id);
95 89
96 kfree(ib_host); 90 kfree(ib_host);
@@ -103,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
103 struct iblock_hba *ib_host = hba->hba_ptr; 97 struct iblock_hba *ib_host = hba->hba_ptr;
104 98
105 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 99 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
106 if (!(ib_dev)) { 100 if (!ib_dev) {
107 printk(KERN_ERR "Unable to allocate struct iblock_dev\n"); 101 pr_err("Unable to allocate struct iblock_dev\n");
108 return NULL; 102 return NULL;
109 } 103 }
110 ib_dev->ibd_host = ib_host; 104 ib_dev->ibd_host = ib_host;
111 105
112 printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name); 106 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
113 107
114 return ib_dev; 108 return ib_dev;
115} 109}
@@ -128,8 +122,8 @@ static struct se_device *iblock_create_virtdevice(
128 u32 dev_flags = 0; 122 u32 dev_flags = 0;
129 int ret = -EINVAL; 123 int ret = -EINVAL;
130 124
131 if (!(ib_dev)) { 125 if (!ib_dev) {
132 printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); 126 pr_err("Unable to locate struct iblock_dev parameter\n");
133 return ERR_PTR(ret); 127 return ERR_PTR(ret);
134 } 128 }
135 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 129 memset(&dev_limits, 0, sizeof(struct se_dev_limits));
@@ -137,16 +131,16 @@ static struct se_device *iblock_create_virtdevice(
137 * These settings need to be made tunable.. 131 * These settings need to be made tunable..
138 */ 132 */
139 ib_dev->ibd_bio_set = bioset_create(32, 64); 133 ib_dev->ibd_bio_set = bioset_create(32, 64);
140 if (!(ib_dev->ibd_bio_set)) { 134 if (!ib_dev->ibd_bio_set) {
141 printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); 135 pr_err("IBLOCK: Unable to create bioset()\n");
142 return ERR_PTR(-ENOMEM); 136 return ERR_PTR(-ENOMEM);
143 } 137 }
144 printk(KERN_INFO "IBLOCK: Created bio_set()\n"); 138 pr_debug("IBLOCK: Created bio_set()\n");
145 /* 139 /*
146 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 140 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
147 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 141 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
148 */ 142 */
149 printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n", 143 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
150 ib_dev->ibd_udev_path); 144 ib_dev->ibd_udev_path);
151 145
152 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 146 bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
@@ -172,7 +166,7 @@ static struct se_device *iblock_create_virtdevice(
172 dev = transport_add_device_to_core_hba(hba, 166 dev = transport_add_device_to_core_hba(hba,
173 &iblock_template, se_dev, dev_flags, ib_dev, 167 &iblock_template, se_dev, dev_flags, ib_dev,
174 &dev_limits, "IBLOCK", IBLOCK_VERSION); 168 &dev_limits, "IBLOCK", IBLOCK_VERSION);
175 if (!(dev)) 169 if (!dev)
176 goto failed; 170 goto failed;
177 171
178 /* 172 /*
@@ -192,7 +186,7 @@ static struct se_device *iblock_create_virtdevice(
192 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 186 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
193 q->limits.discard_alignment; 187 q->limits.discard_alignment;
194 188
195 printk(KERN_INFO "IBLOCK: BLOCK Discard support available," 189 pr_debug("IBLOCK: BLOCK Discard support available,"
196 " disabled by default\n"); 190 " disabled by default\n");
197 } 191 }
198 192
@@ -227,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
227} 221}
228 222
229static struct se_task * 223static struct se_task *
230iblock_alloc_task(struct se_cmd *cmd) 224iblock_alloc_task(unsigned char *cdb)
231{ 225{
232 struct iblock_req *ib_req; 226 struct iblock_req *ib_req;
233 227
234 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 228 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
235 if (!(ib_req)) { 229 if (!ib_req) {
236 printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n"); 230 pr_err("Unable to allocate memory for struct iblock_req\n");
237 return NULL; 231 return NULL;
238 } 232 }
239 233
240 ib_req->ib_dev = cmd->se_dev->dev_ptr;
241 atomic_set(&ib_req->ib_bio_cnt, 0); 234 atomic_set(&ib_req->ib_bio_cnt, 0);
242 return &ib_req->ib_task; 235 return &ib_req->ib_task;
243} 236}
@@ -345,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
345 */ 338 */
346 ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); 339 ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
347 if (ret != 0) { 340 if (ret != 0) {
348 printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d " 341 pr_err("IBLOCK: block_issue_flush() failed: %d "
349 " error_sector: %llu\n", ret, 342 " error_sector: %llu\n", ret,
350 (unsigned long long)error_sector); 343 (unsigned long long)error_sector);
351 } 344 }
@@ -409,8 +402,9 @@ static int iblock_do_task(struct se_task *task)
409 while (bio) { 402 while (bio) {
410 nbio = bio->bi_next; 403 nbio = bio->bi_next;
411 bio->bi_next = NULL; 404 bio->bi_next = NULL;
412 DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p" 405 pr_debug("Calling submit_bio() task: %p bio: %p"
413 " bio->bi_sector: %llu\n", task, bio, bio->bi_sector); 406 " bio->bi_sector: %llu\n", task, bio,
407 (unsigned long long)bio->bi_sector);
414 408
415 submit_bio(rw, bio); 409 submit_bio(rw, bio);
416 bio = nbio; 410 bio = nbio;
@@ -480,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
480 switch (token) { 474 switch (token) {
481 case Opt_udev_path: 475 case Opt_udev_path:
482 if (ib_dev->ibd_bd) { 476 if (ib_dev->ibd_bd) {
483 printk(KERN_ERR "Unable to set udev_path= while" 477 pr_err("Unable to set udev_path= while"
484 " ib_dev->ibd_bd exists\n"); 478 " ib_dev->ibd_bd exists\n");
485 ret = -EEXIST; 479 ret = -EEXIST;
486 goto out; 480 goto out;
@@ -493,7 +487,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
493 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 487 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
494 "%s", arg_p); 488 "%s", arg_p);
495 kfree(arg_p); 489 kfree(arg_p);
496 printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", 490 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
497 ib_dev->ibd_udev_path); 491 ib_dev->ibd_udev_path);
498 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 492 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
499 break; 493 break;
@@ -516,7 +510,7 @@ static ssize_t iblock_check_configfs_dev_params(
516 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 510 struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
517 511
518 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 512 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
519 printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); 513 pr_err("Missing udev_path= parameters for IBLOCK\n");
520 return -EINVAL; 514 return -EINVAL;
521 } 515 }
522 516
@@ -574,15 +568,15 @@ static struct bio *iblock_get_bio(
574 struct bio *bio; 568 struct bio *bio;
575 569
576 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 570 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
577 if (!(bio)) { 571 if (!bio) {
578 printk(KERN_ERR "Unable to allocate memory for bio\n"); 572 pr_err("Unable to allocate memory for bio\n");
579 *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 573 *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
580 return NULL; 574 return NULL;
581 } 575 }
582 576
583 DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:" 577 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
584 " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set); 578 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
585 DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); 579 pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
586 580
587 bio->bi_bdev = ib_dev->ibd_bd; 581 bio->bi_bdev = ib_dev->ibd_bd;
588 bio->bi_private = task; 582 bio->bi_private = task;
@@ -591,8 +585,8 @@ static struct bio *iblock_get_bio(
591 bio->bi_sector = lba; 585 bio->bi_sector = lba;
592 atomic_inc(&ib_req->ib_bio_cnt); 586 atomic_inc(&ib_req->ib_bio_cnt);
593 587
594 DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector); 588 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
595 DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n", 589 pr_debug("Set ib_req->ib_bio_cnt: %d\n",
596 atomic_read(&ib_req->ib_bio_cnt)); 590 atomic_read(&ib_req->ib_bio_cnt));
597 return bio; 591 return bio;
598} 592}
@@ -606,7 +600,7 @@ static int iblock_map_task_SG(struct se_task *task)
606 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; 600 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
607 struct scatterlist *sg; 601 struct scatterlist *sg;
608 int ret = 0; 602 int ret = 0;
609 u32 i, sg_num = task->task_sg_num; 603 u32 i, sg_num = task->task_sg_nents;
610 sector_t block_lba; 604 sector_t block_lba;
611 /* 605 /*
612 * Do starting conversion up from non 512-byte blocksize with 606 * Do starting conversion up from non 512-byte blocksize with
@@ -621,13 +615,13 @@ static int iblock_map_task_SG(struct se_task *task)
621 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 615 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
622 block_lba = task->task_lba; 616 block_lba = task->task_lba;
623 else { 617 else {
624 printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" 618 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
625 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 619 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
626 return PYX_TRANSPORT_LU_COMM_FAILURE; 620 return PYX_TRANSPORT_LU_COMM_FAILURE;
627 } 621 }
628 622
629 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); 623 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
630 if (!(bio)) 624 if (!bio)
631 return ret; 625 return ret;
632 626
633 ib_req->ib_bio = bio; 627 ib_req->ib_bio = bio;
@@ -636,41 +630,41 @@ static int iblock_map_task_SG(struct se_task *task)
636 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist 630 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
637 * from task->task_sg -> struct scatterlist memory. 631 * from task->task_sg -> struct scatterlist memory.
638 */ 632 */
639 for_each_sg(task->task_sg, sg, task->task_sg_num, i) { 633 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
640 DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" 634 pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
641 " %p len: %u offset: %u\n", task, bio, sg_page(sg), 635 " %p len: %u offset: %u\n", task, bio, sg_page(sg),
642 sg->length, sg->offset); 636 sg->length, sg->offset);
643again: 637again:
644 ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); 638 ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
645 if (ret != sg->length) { 639 if (ret != sg->length) {
646 640
647 DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n", 641 pr_debug("*** Set bio->bi_sector: %llu\n",
648 bio->bi_sector); 642 (unsigned long long)bio->bi_sector);
649 DEBUG_IBLOCK("** task->task_size: %u\n", 643 pr_debug("** task->task_size: %u\n",
650 task->task_size); 644 task->task_size);
651 DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n", 645 pr_debug("*** bio->bi_max_vecs: %u\n",
652 bio->bi_max_vecs); 646 bio->bi_max_vecs);
653 DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n", 647 pr_debug("*** bio->bi_vcnt: %u\n",
654 bio->bi_vcnt); 648 bio->bi_vcnt);
655 649
656 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, 650 bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
657 block_lba, sg_num); 651 block_lba, sg_num);
658 if (!(bio)) 652 if (!bio)
659 goto fail; 653 goto fail;
660 654
661 tbio = tbio->bi_next = bio; 655 tbio = tbio->bi_next = bio;
662 DEBUG_IBLOCK("-----------------> Added +1 bio: %p to" 656 pr_debug("-----------------> Added +1 bio: %p to"
663 " list, Going to again\n", bio); 657 " list, Going to again\n", bio);
664 goto again; 658 goto again;
665 } 659 }
666 /* Always in 512 byte units for Linux/Block */ 660 /* Always in 512 byte units for Linux/Block */
667 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 661 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
668 sg_num--; 662 sg_num--;
669 DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented" 663 pr_debug("task: %p bio-add_page() passed!, decremented"
670 " sg_num to %u\n", task, sg_num); 664 " sg_num to %u\n", task, sg_num);
671 DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba" 665 pr_debug("task: %p bio_add_page() passed!, increased lba"
672 " to %llu\n", task, block_lba); 666 " to %llu\n", task, (unsigned long long)block_lba);
673 DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:" 667 pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
674 " %u\n", task, bio->bi_vcnt); 668 " %u\n", task, bio->bi_vcnt);
675 } 669 }
676 670
@@ -716,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err)
716 /* 710 /*
717 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 711 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
718 */ 712 */
719 if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err)) 713 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
720 err = -EIO; 714 err = -EIO;
721 715
722 if (err != 0) { 716 if (err != 0) {
723 printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p," 717 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
724 " err: %d\n", bio, err); 718 " err: %d\n", bio, err);
725 /* 719 /*
726 * Bump the ib_bio_err_cnt and release bio. 720 * Bump the ib_bio_err_cnt and release bio.
@@ -731,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err)
731 /* 725 /*
732 * Wait to complete the task until the last bio as completed. 726 * Wait to complete the task until the last bio as completed.
733 */ 727 */
734 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) 728 if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
735 return; 729 return;
736 730
737 ibr->ib_bio = NULL; 731 ibr->ib_bio = NULL;
738 transport_complete_task(task, 0); 732 transport_complete_task(task, 0);
739 return; 733 return;
740 } 734 }
741 DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 735 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
742 task, bio, task->task_lba, bio->bi_sector, err); 736 task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
743 /* 737 /*
744 * bio_put() will call iblock_bio_destructor() to release the bio back 738 * bio_put() will call iblock_bio_destructor() to release the bio back
745 * to ibr->ib_bio_set. 739 * to ibr->ib_bio_set.
@@ -748,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err)
748 /* 742 /*
749 * Wait to complete the task until the last bio as completed. 743 * Wait to complete the task until the last bio as completed.
750 */ 744 */
751 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) 745 if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
752 return; 746 return;
753 /* 747 /*
754 * Return GOOD status for task if zero ib_bio_err_cnt exists. 748 * Return GOOD status for task if zero ib_bio_err_cnt exists.