aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_actlog.c8
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_nl.c27
-rw-r--r--drivers/block/drbd/drbd_receiver.c18
-rw-r--r--drivers/block/drbd/drbd_req.c8
-rw-r--r--drivers/block/drbd/drbd_worker.c12
7 files changed, 41 insertions, 42 deletions
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index b3f18545b469..b4adb58c7472 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -952,7 +952,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
952 int wake_up = 0; 952 int wake_up = 0;
953 unsigned long flags; 953 unsigned long flags;
954 954
955 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 955 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
956 dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", 956 dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
957 (unsigned long long)sector, size); 957 (unsigned long long)sector, size);
958 return; 958 return;
@@ -1002,7 +1002,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
1002/* 1002/*
1003 * this is intended to set one request worth of data out of sync. 1003 * this is intended to set one request worth of data out of sync.
1004 * affects at least 1 bit, 1004 * affects at least 1 bit,
1005 * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits. 1005 * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits.
1006 * 1006 *
1007 * called by tl_clear and drbd_send_dblock (==drbd_make_request). 1007 * called by tl_clear and drbd_send_dblock (==drbd_make_request).
1008 * so this can be _any_ process. 1008 * so this can be _any_ process.
@@ -1015,7 +1015,7 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
1015 unsigned int enr, count; 1015 unsigned int enr, count;
1016 struct lc_element *e; 1016 struct lc_element *e;
1017 1017
1018 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1018 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
1019 dev_err(DEV, "sector: %llus, size: %d\n", 1019 dev_err(DEV, "sector: %llus, size: %d\n",
1020 (unsigned long long)sector, size); 1020 (unsigned long long)sector, size);
1021 return; 1021 return;
@@ -1387,7 +1387,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
1387 sector_t esector, nr_sectors; 1387 sector_t esector, nr_sectors;
1388 int wake_up = 0; 1388 int wake_up = 0;
1389 1389
1390 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1390 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
1391 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", 1391 dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
1392 (unsigned long long)sector, size); 1392 (unsigned long long)sector, size);
1393 return; 1393 return;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 85207b275e41..bcba2742cfba 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -512,7 +512,7 @@ struct p_sizes {
512 u64 d_size; /* size of disk */ 512 u64 d_size; /* size of disk */
513 u64 u_size; /* user requested size */ 513 u64 u_size; /* user requested size */
514 u64 c_size; /* current exported size */ 514 u64 c_size; /* current exported size */
515 u32 max_segment_size; /* Maximal size of a BIO */ 515 u32 max_bio_size; /* Maximal size of a BIO */
516 u16 queue_order_type; /* not yet implemented in DRBD*/ 516 u16 queue_order_type; /* not yet implemented in DRBD*/
517 u16 dds_flags; /* use enum dds_flags here. */ 517 u16 dds_flags; /* use enum dds_flags here. */
518} __packed; 518} __packed;
@@ -1398,7 +1398,7 @@ struct bm_extent {
1398 * With a value of 8 all IO in one 128K block make it to the same slot of the 1398 * With a value of 8 all IO in one 128K block make it to the same slot of the
1399 * hash table. */ 1399 * hash table. */
1400#define HT_SHIFT 8 1400#define HT_SHIFT 8
1401#define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT)) 1401#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
1402 1402
1403#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ 1403#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
1404 1404
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 451fc36a85cb..9d9c2ed31e9a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1924,7 +1924,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
1924 p.d_size = cpu_to_be64(d_size); 1924 p.d_size = cpu_to_be64(d_size);
1925 p.u_size = cpu_to_be64(u_size); 1925 p.u_size = cpu_to_be64(u_size);
1926 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); 1926 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1927 p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue)); 1927 p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
1928 p.queue_order_type = cpu_to_be16(q_order_type); 1928 p.queue_order_type = cpu_to_be16(q_order_type);
1929 p.dds_flags = cpu_to_be16(flags); 1929 p.dds_flags = cpu_to_be16(flags);
1930 1930
@@ -2952,7 +2952,7 @@ static void drbd_destroy_mempools(void)
2952static int drbd_create_mempools(void) 2952static int drbd_create_mempools(void)
2953{ 2953{
2954 struct page *page; 2954 struct page *page;
2955 const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count; 2955 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
2956 int i; 2956 int i;
2957 2957
2958 /* prepare our caches and mempools */ 2958 /* prepare our caches and mempools */
@@ -3218,7 +3218,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
3218 q->backing_dev_info.congested_data = mdev; 3218 q->backing_dev_info.congested_data = mdev;
3219 3219
3220 blk_queue_make_request(q, drbd_make_request_26); 3220 blk_queue_make_request(q, drbd_make_request_26);
3221 blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE); 3221 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
3222 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); 3222 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3223 blk_queue_merge_bvec(q, drbd_merge_bvec); 3223 blk_queue_merge_bvec(q, drbd_merge_bvec);
3224 q->queue_lock = &mdev->req_lock; 3224 q->queue_lock = &mdev->req_lock;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index dad559810ed6..9e27d82a9a19 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -765,22 +765,21 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
765 return 0; 765 return 0;
766} 766}
767 767
768void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) 768void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local)
769{ 769{
770 struct request_queue * const q = mdev->rq_queue; 770 struct request_queue * const q = mdev->rq_queue;
771 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 771 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
772 int max_segments = mdev->ldev->dc.max_bio_bvecs; 772 int max_segments = mdev->ldev->dc.max_bio_bvecs;
773 int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
773 774
774 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
775
776 blk_queue_max_hw_sectors(q, max_seg_s >> 9);
777 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
778 blk_queue_max_segment_size(q, max_seg_s);
779 blk_queue_logical_block_size(q, 512); 775 blk_queue_logical_block_size(q, 512);
780 blk_queue_segment_boundary(q, PAGE_SIZE-1); 776 blk_queue_max_hw_sectors(q, max_hw_sectors);
781 blk_stack_limits(&q->limits, &b->limits, 0); 777 /* This is the workaround for "bio would need to, but cannot, be split" */
778 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
779 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
780 blk_queue_stack_limits(q, b);
782 781
783 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 782 dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9);
784 783
785 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 784 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
786 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 785 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
@@ -858,7 +857,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
858 struct block_device *bdev; 857 struct block_device *bdev;
859 struct lru_cache *resync_lru = NULL; 858 struct lru_cache *resync_lru = NULL;
860 union drbd_state ns, os; 859 union drbd_state ns, os;
861 unsigned int max_seg_s; 860 unsigned int max_bio_size;
862 int rv; 861 int rv;
863 int cp_discovered = 0; 862 int cp_discovered = 0;
864 int logical_block_size; 863 int logical_block_size;
@@ -1109,20 +1108,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
1109 mdev->read_cnt = 0; 1108 mdev->read_cnt = 0;
1110 mdev->writ_cnt = 0; 1109 mdev->writ_cnt = 0;
1111 1110
1112 max_seg_s = DRBD_MAX_SEGMENT_SIZE; 1111 max_bio_size = DRBD_MAX_BIO_SIZE;
1113 if (mdev->state.conn == C_CONNECTED) { 1112 if (mdev->state.conn == C_CONNECTED) {
1114 /* We are Primary, Connected, and now attach a new local 1113 /* We are Primary, Connected, and now attach a new local
1115 * backing store. We must not increase the user visible maximum 1114 * backing store. We must not increase the user visible maximum
1116 * bio size on this device to something the peer may not be 1115 * bio size on this device to something the peer may not be
1117 * able to handle. */ 1116 * able to handle. */
1118 if (mdev->agreed_pro_version < 94) 1117 if (mdev->agreed_pro_version < 94)
1119 max_seg_s = queue_max_segment_size(mdev->rq_queue); 1118 max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
1120 else if (mdev->agreed_pro_version == 94) 1119 else if (mdev->agreed_pro_version == 94)
1121 max_seg_s = DRBD_MAX_SIZE_H80_PACKET; 1120 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
1122 /* else: drbd 8.3.9 and later, stay with default */ 1121 /* else: drbd 8.3.9 and later, stay with default */
1123 } 1122 }
1124 1123
1125 drbd_setup_queue_param(mdev, max_seg_s); 1124 drbd_setup_queue_param(mdev, max_bio_size);
1126 1125
1127 /* If I am currently not R_PRIMARY, 1126 /* If I am currently not R_PRIMARY,
1128 * but meta data primary indicator is set, 1127 * but meta data primary indicator is set,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index ca213c6e5f9d..79e7b57006b1 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -277,7 +277,7 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; 277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
278 int i; 278 int i;
279 279
280 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) 280 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
281 i = page_chain_free(page); 281 i = page_chain_free(page);
282 else { 282 else {
283 struct page *tmp; 283 struct page *tmp;
@@ -1240,7 +1240,7 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __
1240 data_size -= dgs; 1240 data_size -= dgs;
1241 1241
1242 ERR_IF(data_size & 0x1ff) return NULL; 1242 ERR_IF(data_size & 0x1ff) return NULL;
1243 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL; 1243 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
1244 1244
1245 /* even though we trust out peer, 1245 /* even though we trust out peer,
1246 * we sometimes have to double check. */ 1246 * we sometimes have to double check. */
@@ -1917,7 +1917,7 @@ static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, un
1917 sector = be64_to_cpu(p->sector); 1917 sector = be64_to_cpu(p->sector);
1918 size = be32_to_cpu(p->blksize); 1918 size = be32_to_cpu(p->blksize);
1919 1919
1920 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 1920 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
1921 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 1921 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1922 (unsigned long long)sector, size); 1922 (unsigned long long)sector, size);
1923 return FALSE; 1923 return FALSE;
@@ -2897,7 +2897,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
2897{ 2897{
2898 struct p_sizes *p = &mdev->data.rbuf.sizes; 2898 struct p_sizes *p = &mdev->data.rbuf.sizes;
2899 enum determine_dev_size dd = unchanged; 2899 enum determine_dev_size dd = unchanged;
2900 unsigned int max_seg_s; 2900 unsigned int max_bio_size;
2901 sector_t p_size, p_usize, my_usize; 2901 sector_t p_size, p_usize, my_usize;
2902 int ldsc = 0; /* local disk size changed */ 2902 int ldsc = 0; /* local disk size changed */
2903 enum dds_flags ddsf; 2903 enum dds_flags ddsf;
@@ -2970,14 +2970,14 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
2970 } 2970 }
2971 2971
2972 if (mdev->agreed_pro_version < 94) 2972 if (mdev->agreed_pro_version < 94)
2973 max_seg_s = be32_to_cpu(p->max_segment_size); 2973 max_bio_size = be32_to_cpu(p->max_bio_size);
2974 else if (mdev->agreed_pro_version == 94) 2974 else if (mdev->agreed_pro_version == 94)
2975 max_seg_s = DRBD_MAX_SIZE_H80_PACKET; 2975 max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
2976 else /* drbd 8.3.8 onwards */ 2976 else /* drbd 8.3.8 onwards */
2977 max_seg_s = DRBD_MAX_SEGMENT_SIZE; 2977 max_bio_size = DRBD_MAX_BIO_SIZE;
2978 2978
2979 if (max_seg_s != queue_max_segment_size(mdev->rq_queue)) 2979 if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9)
2980 drbd_setup_queue_param(mdev, max_seg_s); 2980 drbd_setup_queue_param(mdev, max_bio_size);
2981 2981
2982 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); 2982 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
2983 put_ldev(mdev); 2983 put_ldev(mdev);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index ad3fc6228f27..08f53ce9b88f 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1047,7 +1047,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
1047 1047
1048 /* can this bio be split generically? 1048 /* can this bio be split generically?
1049 * Maybe add our own split-arbitrary-bios function. */ 1049 * Maybe add our own split-arbitrary-bios function. */
1050 if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_SEGMENT_SIZE) { 1050 if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) {
1051 /* rather error out here than BUG in bio_split */ 1051 /* rather error out here than BUG in bio_split */
1052 dev_err(DEV, "bio would need to, but cannot, be split: " 1052 dev_err(DEV, "bio would need to, but cannot, be split: "
1053 "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n", 1053 "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
@@ -1098,7 +1098,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
1098} 1098}
1099 1099
1100/* This is called by bio_add_page(). With this function we reduce 1100/* This is called by bio_add_page(). With this function we reduce
1101 * the number of BIOs that span over multiple DRBD_MAX_SEGMENT_SIZEs 1101 * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs
1102 * units (was AL_EXTENTs). 1102 * units (was AL_EXTENTs).
1103 * 1103 *
1104 * we do the calculation within the lower 32bit of the byte offsets, 1104 * we do the calculation within the lower 32bit of the byte offsets,
@@ -1118,8 +1118,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
1118 unsigned int bio_size = bvm->bi_size; 1118 unsigned int bio_size = bvm->bi_size;
1119 int limit, backing_limit; 1119 int limit, backing_limit;
1120 1120
1121 limit = DRBD_MAX_SEGMENT_SIZE 1121 limit = DRBD_MAX_BIO_SIZE
1122 - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size); 1122 - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size);
1123 if (limit < 0) 1123 if (limit < 0)
1124 limit = 0; 1124 limit = 0;
1125 if (bio_size == 0) { 1125 if (bio_size == 0) {
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index af805efc94d7..782d87237cb4 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -524,7 +524,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
524 unsigned long bit; 524 unsigned long bit;
525 sector_t sector; 525 sector_t sector;
526 const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 526 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
527 int max_segment_size; 527 int max_bio_size;
528 int number, rollback_i, size; 528 int number, rollback_i, size;
529 int align, queued, sndbuf; 529 int align, queued, sndbuf;
530 int i = 0; 530 int i = 0;
@@ -559,9 +559,9 @@ int w_make_resync_request(struct drbd_conf *mdev,
559 559
560 /* starting with drbd 8.3.8, we can handle multi-bio EEs, 560 /* starting with drbd 8.3.8, we can handle multi-bio EEs,
561 * if it should be necessary */ 561 * if it should be necessary */
562 max_segment_size = 562 max_bio_size =
563 mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) : 563 mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
564 mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE; 564 mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
565 565
566 number = drbd_rs_number_requests(mdev); 566 number = drbd_rs_number_requests(mdev);
567 if (number == 0) 567 if (number == 0)
@@ -605,7 +605,7 @@ next_sector:
605 goto next_sector; 605 goto next_sector;
606 } 606 }
607 607
608#if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE 608#if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
609 /* try to find some adjacent bits. 609 /* try to find some adjacent bits.
610 * we stop if we have already the maximum req size. 610 * we stop if we have already the maximum req size.
611 * 611 *
@@ -615,7 +615,7 @@ next_sector:
615 align = 1; 615 align = 1;
616 rollback_i = i; 616 rollback_i = i;
617 for (;;) { 617 for (;;) {
618 if (size + BM_BLOCK_SIZE > max_segment_size) 618 if (size + BM_BLOCK_SIZE > max_bio_size)
619 break; 619 break;
620 620
621 /* Be always aligned */ 621 /* Be always aligned */