diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-08 22:49:35 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-05-08 22:49:35 -0400 |
commit | 1daac193f21d6e3d0adc528a06a7e11522d4254d (patch) | |
tree | 4034f896bc92bc3568c0e9bc1cd1df0af884d625 | |
parent | 41c64bb19c740b5433f768032ecaf05375c955ee (diff) | |
parent | 0ff28d9f4674d781e492bcff6f32f0fe48cf0fed (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
"A collection of fixes since the merge window;
- fix for a double elevator module release, from Chao Yu. Ancient bug.
- the splice() MORE flag fix from Christophe Leroy.
- a fix for NVMe, fixing a patch that went in in the merge window.
From Keith.
- two fixes for blk-mq CPU hotplug handling, from Ming Lei.
- bdi vs blockdev lifetime fix from Neil Brown, fixing and oops in md.
- two blk-mq fixes from Shaohua, fixing a race on queue stop and a
bad merge issue with FUA writes.
- division-by-zero fix for writeback from Tejun.
- a block bounce page accounting fix, making sure we inc/dec after
bouncing so that pre/post IO pages match up. From Wang YanQing"
* 'for-linus' of git://git.kernel.dk/linux-block:
splice: sendfile() at once fails for big files
blk-mq: don't lose requests if a stopped queue restarts
blk-mq: fix FUA request hang
block: destroy bdi before blockdev is unregistered.
block:bounce: fix call inc_|dec_zone_page_state on different pages confuse value of NR_BOUNCE
elevator: fix double release of elevator module
writeback: use |1 instead of +1 to protect against div by zero
blk-mq: fix CPU hotplug handling
blk-mq: fix race between timeout and CPU hotplug
NVMe: Fix VPD B0 max sectors translation
-rw-r--r-- | block/blk-core.c | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 60 | ||||
-rw-r--r-- | block/blk-sysfs.c | 2 | ||||
-rw-r--r-- | block/bounce.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 6 | ||||
-rw-r--r-- | drivers/block/loop.c | 2 | ||||
-rw-r--r-- | drivers/block/nvme-scsi.c | 3 | ||||
-rw-r--r-- | drivers/md/md.c | 4 | ||||
-rw-r--r-- | fs/splice.c | 12 | ||||
-rw-r--r-- | include/linux/blk_types.h | 2 | ||||
-rw-r--r-- | mm/page-writeback.c | 6 |
11 files changed, 60 insertions, 41 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index fd154b94447a..7871603f0a29 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q) | |||
552 | q->queue_lock = &q->__queue_lock; | 552 | q->queue_lock = &q->__queue_lock; |
553 | spin_unlock_irq(lock); | 553 | spin_unlock_irq(lock); |
554 | 554 | ||
555 | bdi_destroy(&q->backing_dev_info); | ||
556 | |||
555 | /* @q is and will stay empty, shutdown and put */ | 557 | /* @q is and will stay empty, shutdown and put */ |
556 | blk_put_queue(q); | 558 | blk_put_queue(q); |
557 | } | 559 | } |
diff --git a/block/blk-mq.c b/block/blk-mq.c index ade8a2d1b0aa..e68b71b85a7e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -677,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv) | |||
677 | data.next = blk_rq_timeout(round_jiffies_up(data.next)); | 677 | data.next = blk_rq_timeout(round_jiffies_up(data.next)); |
678 | mod_timer(&q->timeout, data.next); | 678 | mod_timer(&q->timeout, data.next); |
679 | } else { | 679 | } else { |
680 | queue_for_each_hw_ctx(q, hctx, i) | 680 | queue_for_each_hw_ctx(q, hctx, i) { |
681 | blk_mq_tag_idle(hctx); | 681 | /* the hctx may be unmapped, so check it here */ |
682 | if (blk_mq_hw_queue_mapped(hctx)) | ||
683 | blk_mq_tag_idle(hctx); | ||
684 | } | ||
682 | } | 685 | } |
683 | } | 686 | } |
684 | 687 | ||
@@ -855,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) | |||
855 | spin_lock(&hctx->lock); | 858 | spin_lock(&hctx->lock); |
856 | list_splice(&rq_list, &hctx->dispatch); | 859 | list_splice(&rq_list, &hctx->dispatch); |
857 | spin_unlock(&hctx->lock); | 860 | spin_unlock(&hctx->lock); |
861 | /* | ||
862 | * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but | ||
863 | * it's possible the queue is stopped and restarted again | ||
864 | * before this. Queue restart will dispatch requests. And since | ||
865 | * requests in rq_list aren't added into hctx->dispatch yet, | ||
866 | * the requests in rq_list might get lost. | ||
867 | * | ||
868 | * blk_mq_run_hw_queue() already checks the STOPPED bit | ||
869 | **/ | ||
870 | blk_mq_run_hw_queue(hctx, true); | ||
858 | } | 871 | } |
859 | } | 872 | } |
860 | 873 | ||
@@ -1571,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) | |||
1571 | return NOTIFY_OK; | 1584 | return NOTIFY_OK; |
1572 | } | 1585 | } |
1573 | 1586 | ||
1574 | static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu) | ||
1575 | { | ||
1576 | struct request_queue *q = hctx->queue; | ||
1577 | struct blk_mq_tag_set *set = q->tag_set; | ||
1578 | |||
1579 | if (set->tags[hctx->queue_num]) | ||
1580 | return NOTIFY_OK; | ||
1581 | |||
1582 | set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num); | ||
1583 | if (!set->tags[hctx->queue_num]) | ||
1584 | return NOTIFY_STOP; | ||
1585 | |||
1586 | hctx->tags = set->tags[hctx->queue_num]; | ||
1587 | return NOTIFY_OK; | ||
1588 | } | ||
1589 | |||
1590 | static int blk_mq_hctx_notify(void *data, unsigned long action, | 1587 | static int blk_mq_hctx_notify(void *data, unsigned long action, |
1591 | unsigned int cpu) | 1588 | unsigned int cpu) |
1592 | { | 1589 | { |
@@ -1594,8 +1591,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, | |||
1594 | 1591 | ||
1595 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) | 1592 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) |
1596 | return blk_mq_hctx_cpu_offline(hctx, cpu); | 1593 | return blk_mq_hctx_cpu_offline(hctx, cpu); |
1597 | else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) | 1594 | |
1598 | return blk_mq_hctx_cpu_online(hctx, cpu); | 1595 | /* |
1596 | * In case of CPU online, tags may be reallocated | ||
1597 | * in blk_mq_map_swqueue() after mapping is updated. | ||
1598 | */ | ||
1599 | 1599 | ||
1600 | return NOTIFY_OK; | 1600 | return NOTIFY_OK; |
1601 | } | 1601 | } |
@@ -1775,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1775 | unsigned int i; | 1775 | unsigned int i; |
1776 | struct blk_mq_hw_ctx *hctx; | 1776 | struct blk_mq_hw_ctx *hctx; |
1777 | struct blk_mq_ctx *ctx; | 1777 | struct blk_mq_ctx *ctx; |
1778 | struct blk_mq_tag_set *set = q->tag_set; | ||
1778 | 1779 | ||
1779 | queue_for_each_hw_ctx(q, hctx, i) { | 1780 | queue_for_each_hw_ctx(q, hctx, i) { |
1780 | cpumask_clear(hctx->cpumask); | 1781 | cpumask_clear(hctx->cpumask); |
@@ -1803,16 +1804,20 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1803 | * disable it and free the request entries. | 1804 | * disable it and free the request entries. |
1804 | */ | 1805 | */ |
1805 | if (!hctx->nr_ctx) { | 1806 | if (!hctx->nr_ctx) { |
1806 | struct blk_mq_tag_set *set = q->tag_set; | ||
1807 | |||
1808 | if (set->tags[i]) { | 1807 | if (set->tags[i]) { |
1809 | blk_mq_free_rq_map(set, set->tags[i], i); | 1808 | blk_mq_free_rq_map(set, set->tags[i], i); |
1810 | set->tags[i] = NULL; | 1809 | set->tags[i] = NULL; |
1811 | hctx->tags = NULL; | ||
1812 | } | 1810 | } |
1811 | hctx->tags = NULL; | ||
1813 | continue; | 1812 | continue; |
1814 | } | 1813 | } |
1815 | 1814 | ||
1815 | /* unmapped hw queue can be remapped after CPU topo changed */ | ||
1816 | if (!set->tags[i]) | ||
1817 | set->tags[i] = blk_mq_init_rq_map(set, i); | ||
1818 | hctx->tags = set->tags[i]; | ||
1819 | WARN_ON(!hctx->tags); | ||
1820 | |||
1816 | /* | 1821 | /* |
1817 | * Set the map size to the number of mapped software queues. | 1822 | * Set the map size to the number of mapped software queues. |
1818 | * This is more accurate and more efficient than looping | 1823 | * This is more accurate and more efficient than looping |
@@ -2090,9 +2095,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
2090 | */ | 2095 | */ |
2091 | list_for_each_entry(q, &all_q_list, all_q_node) | 2096 | list_for_each_entry(q, &all_q_list, all_q_node) |
2092 | blk_mq_freeze_queue_start(q); | 2097 | blk_mq_freeze_queue_start(q); |
2093 | list_for_each_entry(q, &all_q_list, all_q_node) | 2098 | list_for_each_entry(q, &all_q_list, all_q_node) { |
2094 | blk_mq_freeze_queue_wait(q); | 2099 | blk_mq_freeze_queue_wait(q); |
2095 | 2100 | ||
2101 | /* | ||
2102 | * timeout handler can't touch hw queue during the | ||
2103 | * reinitialization | ||
2104 | */ | ||
2105 | del_timer_sync(&q->timeout); | ||
2106 | } | ||
2107 | |||
2096 | list_for_each_entry(q, &all_q_list, all_q_node) | 2108 | list_for_each_entry(q, &all_q_list, all_q_node) |
2097 | blk_mq_queue_reinit(q); | 2109 | blk_mq_queue_reinit(q); |
2098 | 2110 | ||
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index faaf36ade7eb..2b8fd302f677 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj) | |||
522 | 522 | ||
523 | blk_trace_shutdown(q); | 523 | blk_trace_shutdown(q); |
524 | 524 | ||
525 | bdi_destroy(&q->backing_dev_info); | ||
526 | |||
527 | ida_simple_remove(&blk_queue_ida, q->id); | 525 | ida_simple_remove(&blk_queue_ida, q->id); |
528 | call_rcu(&q->rcu_head, blk_free_queue_rcu); | 526 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
529 | } | 527 | } |
diff --git a/block/bounce.c b/block/bounce.c index ab21ba203d5c..ed9dd8067120 100644 --- a/block/bounce.c +++ b/block/bounce.c | |||
@@ -221,8 +221,8 @@ bounce: | |||
221 | if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) | 221 | if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) |
222 | continue; | 222 | continue; |
223 | 223 | ||
224 | inc_zone_page_state(to->bv_page, NR_BOUNCE); | ||
225 | to->bv_page = mempool_alloc(pool, q->bounce_gfp); | 224 | to->bv_page = mempool_alloc(pool, q->bounce_gfp); |
225 | inc_zone_page_state(to->bv_page, NR_BOUNCE); | ||
226 | 226 | ||
227 | if (rw == WRITE) { | 227 | if (rw == WRITE) { |
228 | char *vto, *vfrom; | 228 | char *vto, *vfrom; |
diff --git a/block/elevator.c b/block/elevator.c index 59794d0d38e3..8985038f398c 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, | |||
157 | 157 | ||
158 | eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); | 158 | eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); |
159 | if (unlikely(!eq)) | 159 | if (unlikely(!eq)) |
160 | goto err; | 160 | return NULL; |
161 | 161 | ||
162 | eq->type = e; | 162 | eq->type = e; |
163 | kobject_init(&eq->kobj, &elv_ktype); | 163 | kobject_init(&eq->kobj, &elv_ktype); |
@@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, | |||
165 | hash_init(eq->hash); | 165 | hash_init(eq->hash); |
166 | 166 | ||
167 | return eq; | 167 | return eq; |
168 | err: | ||
169 | kfree(eq); | ||
170 | elevator_put(e); | ||
171 | return NULL; | ||
172 | } | 168 | } |
173 | EXPORT_SYMBOL(elevator_alloc); | 169 | EXPORT_SYMBOL(elevator_alloc); |
174 | 170 | ||
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ae3fcb4199e9..d7173cb1ea76 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1620,8 +1620,8 @@ out: | |||
1620 | 1620 | ||
1621 | static void loop_remove(struct loop_device *lo) | 1621 | static void loop_remove(struct loop_device *lo) |
1622 | { | 1622 | { |
1623 | del_gendisk(lo->lo_disk); | ||
1624 | blk_cleanup_queue(lo->lo_queue); | 1623 | blk_cleanup_queue(lo->lo_queue); |
1624 | del_gendisk(lo->lo_disk); | ||
1625 | blk_mq_free_tag_set(&lo->tag_set); | 1625 | blk_mq_free_tag_set(&lo->tag_set); |
1626 | put_disk(lo->lo_disk); | 1626 | put_disk(lo->lo_disk); |
1627 | kfree(lo); | 1627 | kfree(lo); |
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index 6b736b00f63e..88f13c525712 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c | |||
@@ -944,7 +944,8 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, | |||
944 | static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, | 944 | static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
945 | u8 *inq_response, int alloc_len) | 945 | u8 *inq_response, int alloc_len) |
946 | { | 946 | { |
947 | __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue)); | 947 | __be32 max_sectors = cpu_to_be32( |
948 | nvme_block_nr(ns, queue_max_hw_sectors(ns->queue))); | ||
948 | __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); | 949 | __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); |
949 | __be32 discard_desc_count = cpu_to_be32(0x100); | 950 | __be32 discard_desc_count = cpu_to_be32(0x100); |
950 | 951 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index d4f31e195e26..593a02476c78 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -4818,12 +4818,12 @@ static void md_free(struct kobject *ko) | |||
4818 | if (mddev->sysfs_state) | 4818 | if (mddev->sysfs_state) |
4819 | sysfs_put(mddev->sysfs_state); | 4819 | sysfs_put(mddev->sysfs_state); |
4820 | 4820 | ||
4821 | if (mddev->queue) | ||
4822 | blk_cleanup_queue(mddev->queue); | ||
4821 | if (mddev->gendisk) { | 4823 | if (mddev->gendisk) { |
4822 | del_gendisk(mddev->gendisk); | 4824 | del_gendisk(mddev->gendisk); |
4823 | put_disk(mddev->gendisk); | 4825 | put_disk(mddev->gendisk); |
4824 | } | 4826 | } |
4825 | if (mddev->queue) | ||
4826 | blk_cleanup_queue(mddev->queue); | ||
4827 | 4827 | ||
4828 | kfree(mddev); | 4828 | kfree(mddev); |
4829 | } | 4829 | } |
diff --git a/fs/splice.c b/fs/splice.c index 476024bb6546..bfe62ae40f40 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -1161,7 +1161,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, | |||
1161 | long ret, bytes; | 1161 | long ret, bytes; |
1162 | umode_t i_mode; | 1162 | umode_t i_mode; |
1163 | size_t len; | 1163 | size_t len; |
1164 | int i, flags; | 1164 | int i, flags, more; |
1165 | 1165 | ||
1166 | /* | 1166 | /* |
1167 | * We require the input being a regular file, as we don't want to | 1167 | * We require the input being a regular file, as we don't want to |
@@ -1204,6 +1204,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, | |||
1204 | * Don't block on output, we have to drain the direct pipe. | 1204 | * Don't block on output, we have to drain the direct pipe. |
1205 | */ | 1205 | */ |
1206 | sd->flags &= ~SPLICE_F_NONBLOCK; | 1206 | sd->flags &= ~SPLICE_F_NONBLOCK; |
1207 | more = sd->flags & SPLICE_F_MORE; | ||
1207 | 1208 | ||
1208 | while (len) { | 1209 | while (len) { |
1209 | size_t read_len; | 1210 | size_t read_len; |
@@ -1217,6 +1218,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, | |||
1217 | sd->total_len = read_len; | 1218 | sd->total_len = read_len; |
1218 | 1219 | ||
1219 | /* | 1220 | /* |
1221 | * If more data is pending, set SPLICE_F_MORE | ||
1222 | * If this is the last data and SPLICE_F_MORE was not set | ||
1223 | * initially, clears it. | ||
1224 | */ | ||
1225 | if (read_len < len) | ||
1226 | sd->flags |= SPLICE_F_MORE; | ||
1227 | else if (!more) | ||
1228 | sd->flags &= ~SPLICE_F_MORE; | ||
1229 | /* | ||
1220 | * NOTE: nonblocking mode only applies to the input. We | 1230 | * NOTE: nonblocking mode only applies to the input. We |
1221 | * must not do the output in nonblocking mode as then we | 1231 | * must not do the output in nonblocking mode as then we |
1222 | * could get stuck data in the internal pipe: | 1232 | * could get stuck data in the internal pipe: |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index a1b25e35ea5f..b7299febc4b4 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -220,7 +220,7 @@ enum rq_flag_bits { | |||
220 | 220 | ||
221 | /* This mask is used for both bio and request merge checking */ | 221 | /* This mask is used for both bio and request merge checking */ |
222 | #define REQ_NOMERGE_FLAGS \ | 222 | #define REQ_NOMERGE_FLAGS \ |
223 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) | 223 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ) |
224 | 224 | ||
225 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) | 225 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) |
226 | #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) | 226 | #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5daf5568b9e1..eb59f7eea508 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint, | |||
580 | long x; | 580 | long x; |
581 | 581 | ||
582 | x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, | 582 | x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, |
583 | limit - setpoint + 1); | 583 | (limit - setpoint) | 1); |
584 | pos_ratio = x; | 584 | pos_ratio = x; |
585 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | 585 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; |
586 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | 586 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; |
@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, | |||
807 | * scale global setpoint to bdi's: | 807 | * scale global setpoint to bdi's: |
808 | * bdi_setpoint = setpoint * bdi_thresh / thresh | 808 | * bdi_setpoint = setpoint * bdi_thresh / thresh |
809 | */ | 809 | */ |
810 | x = div_u64((u64)bdi_thresh << 16, thresh + 1); | 810 | x = div_u64((u64)bdi_thresh << 16, thresh | 1); |
811 | bdi_setpoint = setpoint * (u64)x >> 16; | 811 | bdi_setpoint = setpoint * (u64)x >> 16; |
812 | /* | 812 | /* |
813 | * Use span=(8*write_bw) in single bdi case as indicated by | 813 | * Use span=(8*write_bw) in single bdi case as indicated by |
@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, | |||
822 | 822 | ||
823 | if (bdi_dirty < x_intercept - span / 4) { | 823 | if (bdi_dirty < x_intercept - span / 4) { |
824 | pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), | 824 | pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), |
825 | x_intercept - bdi_setpoint + 1); | 825 | (x_intercept - bdi_setpoint) | 1); |
826 | } else | 826 | } else |
827 | pos_ratio /= 4; | 827 | pos_ratio /= 4; |
828 | 828 | ||